mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-15 14:27:06 +01:00
Merge remote-tracking branch 'upstream/develop' into feature/ddalphaamg
This commit is contained in:
@ -308,32 +308,34 @@ namespace Grid {
|
||||
public:
|
||||
SchurStaggeredOperator (Matrix &Mat): _Mat(Mat){};
|
||||
virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
|
||||
GridLogIterative.TimingMode(1);
|
||||
std::cout << GridLogIterative << " HermOpAndNorm "<<std::endl;
|
||||
n2 = Mpc(in,out);
|
||||
std::cout << GridLogIterative << " HermOpAndNorm.Mpc "<<std::endl;
|
||||
ComplexD dot= innerProduct(in,out);
|
||||
std::cout << GridLogIterative << " HermOpAndNorm.innerProduct "<<std::endl;
|
||||
n1 = real(dot);
|
||||
}
|
||||
virtual void HermOp(const Field &in, Field &out){
|
||||
std::cout << GridLogIterative << " HermOp "<<std::endl;
|
||||
Mpc(in,out);
|
||||
}
|
||||
virtual RealD Mpc (const Field &in, Field &out) {
|
||||
Field tmp(in._grid);
|
||||
Field tmp2(in._grid);
|
||||
|
||||
std::cout << GridLogIterative << " HermOp.Mpc "<<std::endl;
|
||||
_Mat.Mooee(in,out);
|
||||
_Mat.Mooee(out,tmp);
|
||||
std::cout << GridLogIterative << " HermOp.MooeeMooee "<<std::endl;
|
||||
|
||||
_Mat.Meooe(in,out);
|
||||
_Mat.Meooe(out,tmp2);
|
||||
std::cout << GridLogIterative << " HermOp.MeooeMeooe "<<std::endl;
|
||||
|
||||
return axpy_norm(out,-1.0,tmp2,tmp);
|
||||
#if 0
|
||||
//... much prefer conventional Schur norm
|
||||
_Mat.Meooe(in,tmp);
|
||||
_Mat.MooeeInv(tmp,out);
|
||||
_Mat.Meooe(out,tmp);
|
||||
_Mat.Mooee(in,out);
|
||||
return axpy_norm(out,-1.0,tmp,out);
|
||||
#endif
|
||||
RealD nn=axpy_norm(out,-1.0,tmp2,tmp);
|
||||
std::cout << GridLogIterative << " HermOp.axpy_norm "<<std::endl;
|
||||
return nn;
|
||||
}
|
||||
virtual RealD MpcDag (const Field &in, Field &out){
|
||||
return Mpc(in,out);
|
||||
|
@ -123,11 +123,14 @@ namespace Grid {
|
||||
Field tmp(grid);
|
||||
Field Mtmp(grid);
|
||||
Field resid(fgrid);
|
||||
|
||||
|
||||
std::cout << GridLogMessage << " SchurRedBlackStaggeredSolve " <<std::endl;
|
||||
pickCheckerboard(Even,src_e,in);
|
||||
pickCheckerboard(Odd ,src_o,in);
|
||||
pickCheckerboard(Even,sol_e,out);
|
||||
pickCheckerboard(Odd ,sol_o,out);
|
||||
|
||||
std::cout << GridLogMessage << " SchurRedBlackStaggeredSolve checkerboards picked" <<std::endl;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
// src_o = (source_o - Moe MeeInv source_e)
|
||||
@ -144,6 +147,7 @@ namespace Grid {
|
||||
//////////////////////////////////////////////////////////////
|
||||
std::cout<<GridLogMessage << "SchurRedBlackStaggeredSolver calling the Mpc solver" <<std::endl;
|
||||
_HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
|
||||
std::cout<<GridLogMessage << "SchurRedBlackStaggeredSolver called the Mpc solver" <<std::endl;
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
// sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
|
||||
@ -152,15 +156,16 @@ namespace Grid {
|
||||
src_e = src_e-tmp; assert( src_e.checkerboard ==Even);
|
||||
_Matrix.MooeeInv(src_e,sol_e); assert( sol_e.checkerboard ==Even);
|
||||
|
||||
std::cout<<GridLogMessage << "SchurRedBlackStaggeredSolver reconstructed other CB" <<std::endl;
|
||||
setCheckerboard(out,sol_e); assert( sol_e.checkerboard ==Even);
|
||||
setCheckerboard(out,sol_o); assert( sol_o.checkerboard ==Odd );
|
||||
std::cout<<GridLogMessage << "SchurRedBlackStaggeredSolver inserted solution" <<std::endl;
|
||||
|
||||
// Verify the unprec residual
|
||||
_Matrix.M(out,resid);
|
||||
resid = resid-in;
|
||||
RealD ns = norm2(in);
|
||||
RealD nr = norm2(resid);
|
||||
|
||||
std::cout<<GridLogMessage << "SchurRedBlackStaggered solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl;
|
||||
}
|
||||
};
|
||||
|
@ -3,9 +3,12 @@
|
||||
|
||||
namespace Grid {
|
||||
|
||||
MemoryStats *MemoryProfiler::stats = nullptr;
|
||||
bool MemoryProfiler::debug = false;
|
||||
|
||||
int PointerCache::victim;
|
||||
|
||||
PointerCache::PointerCacheEntry PointerCache::Entries[PointerCache::Ncache];
|
||||
PointerCache::PointerCacheEntry PointerCache::Entries[PointerCache::Ncache];
|
||||
|
||||
void *PointerCache::Insert(void *ptr,size_t bytes) {
|
||||
|
||||
@ -94,4 +97,29 @@ void check_huge_pages(void *Buf,uint64_t BYTES)
|
||||
#endif
|
||||
}
|
||||
|
||||
std::string sizeString(const size_t bytes)
|
||||
{
|
||||
constexpr unsigned int bufSize = 256;
|
||||
const char *suffixes[7] = {"", "K", "M", "G", "T", "P", "E"};
|
||||
char buf[256];
|
||||
size_t s = 0;
|
||||
double count = bytes;
|
||||
|
||||
while (count >= 1024 && s < 7)
|
||||
{
|
||||
s++;
|
||||
count /= 1024;
|
||||
}
|
||||
if (count - floor(count) == 0.0)
|
||||
{
|
||||
snprintf(buf, bufSize, "%d %sB", (int)count, suffixes[s]);
|
||||
}
|
||||
else
|
||||
{
|
||||
snprintf(buf, bufSize, "%.1f %sB", count, suffixes[s]);
|
||||
}
|
||||
|
||||
return std::string(buf);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -63,6 +63,64 @@ namespace Grid {
|
||||
static void *Lookup(size_t bytes) ;
|
||||
|
||||
};
|
||||
|
||||
std::string sizeString(size_t bytes);
|
||||
|
||||
struct MemoryStats
|
||||
{
|
||||
size_t totalAllocated{0}, maxAllocated{0},
|
||||
currentlyAllocated{0}, totalFreed{0};
|
||||
};
|
||||
|
||||
class MemoryProfiler
|
||||
{
|
||||
public:
|
||||
static MemoryStats *stats;
|
||||
static bool debug;
|
||||
};
|
||||
|
||||
#define memString(bytes) std::to_string(bytes) + " (" + sizeString(bytes) + ")"
|
||||
#define profilerDebugPrint \
|
||||
if (MemoryProfiler::stats)\
|
||||
{\
|
||||
auto s = MemoryProfiler::stats;\
|
||||
std::cout << GridLogDebug << "[Memory debug] Stats " << MemoryProfiler::stats << std::endl;\
|
||||
std::cout << GridLogDebug << "[Memory debug] total : " << memString(s->totalAllocated) \
|
||||
<< std::endl;\
|
||||
std::cout << GridLogDebug << "[Memory debug] max : " << memString(s->maxAllocated) \
|
||||
<< std::endl;\
|
||||
std::cout << GridLogDebug << "[Memory debug] current: " << memString(s->currentlyAllocated) \
|
||||
<< std::endl;\
|
||||
std::cout << GridLogDebug << "[Memory debug] freed : " << memString(s->totalFreed) \
|
||||
<< std::endl;\
|
||||
}
|
||||
|
||||
#define profilerAllocate(bytes)\
|
||||
if (MemoryProfiler::stats)\
|
||||
{\
|
||||
auto s = MemoryProfiler::stats;\
|
||||
s->totalAllocated += (bytes);\
|
||||
s->currentlyAllocated += (bytes);\
|
||||
s->maxAllocated = std::max(s->maxAllocated, s->currentlyAllocated);\
|
||||
}\
|
||||
if (MemoryProfiler::debug)\
|
||||
{\
|
||||
std::cout << GridLogDebug << "[Memory debug] allocating " << memString(bytes) << std::endl;\
|
||||
profilerDebugPrint;\
|
||||
}
|
||||
|
||||
#define profilerFree(bytes)\
|
||||
if (MemoryProfiler::stats)\
|
||||
{\
|
||||
auto s = MemoryProfiler::stats;\
|
||||
s->totalFreed += (bytes);\
|
||||
s->currentlyAllocated -= (bytes);\
|
||||
}\
|
||||
if (MemoryProfiler::debug)\
|
||||
{\
|
||||
std::cout << GridLogDebug << "[Memory debug] freeing " << memString(bytes) << std::endl;\
|
||||
profilerDebugPrint;\
|
||||
}
|
||||
|
||||
void check_huge_pages(void *Buf,uint64_t BYTES);
|
||||
|
||||
@ -92,6 +150,7 @@ public:
|
||||
pointer allocate(size_type __n, const void* _p= 0)
|
||||
{
|
||||
size_type bytes = __n*sizeof(_Tp);
|
||||
profilerAllocate(bytes);
|
||||
|
||||
_Tp *ptr = (_Tp *) PointerCache::Lookup(bytes);
|
||||
// if ( ptr != NULL )
|
||||
@ -122,6 +181,8 @@ public:
|
||||
void deallocate(pointer __p, size_type __n) {
|
||||
size_type bytes = __n * sizeof(_Tp);
|
||||
|
||||
profilerFree(bytes);
|
||||
|
||||
pointer __freeme = (pointer)PointerCache::Insert((void *)__p,bytes);
|
||||
|
||||
#ifdef HAVE_MM_MALLOC_H
|
||||
@ -172,10 +233,13 @@ public:
|
||||
#ifdef GRID_COMMS_SHMEM
|
||||
pointer allocate(size_type __n, const void* _p= 0)
|
||||
{
|
||||
size_type bytes = __n*sizeof(_Tp);
|
||||
|
||||
profilerAllocate(bytes);
|
||||
#ifdef CRAY
|
||||
_Tp *ptr = (_Tp *) shmem_align(__n*sizeof(_Tp),64);
|
||||
_Tp *ptr = (_Tp *) shmem_align(bytes,64);
|
||||
#else
|
||||
_Tp *ptr = (_Tp *) shmem_align(64,__n*sizeof(_Tp));
|
||||
_Tp *ptr = (_Tp *) shmem_align(64,bytes);
|
||||
#endif
|
||||
#ifdef PARANOID_SYMMETRIC_HEAP
|
||||
static void * bcast;
|
||||
@ -193,18 +257,23 @@ public:
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
void deallocate(pointer __p, size_type) {
|
||||
void deallocate(pointer __p, size_type __n) {
|
||||
size_type bytes = __n*sizeof(_Tp);
|
||||
|
||||
profilerFree(bytes);
|
||||
shmem_free((void *)__p);
|
||||
}
|
||||
#else
|
||||
pointer allocate(size_type __n, const void* _p= 0)
|
||||
{
|
||||
#ifdef HAVE_MM_MALLOC_H
|
||||
_Tp * ptr = (_Tp *) _mm_malloc(__n*sizeof(_Tp),GRID_ALLOC_ALIGN);
|
||||
#else
|
||||
_Tp * ptr = (_Tp *) memalign(GRID_ALLOC_ALIGN,__n*sizeof(_Tp));
|
||||
#endif
|
||||
size_type bytes = __n*sizeof(_Tp);
|
||||
|
||||
profilerAllocate(bytes);
|
||||
#ifdef HAVE_MM_MALLOC_H
|
||||
_Tp * ptr = (_Tp *) _mm_malloc(bytes, GRID_ALLOC_ALIGN);
|
||||
#else
|
||||
_Tp * ptr = (_Tp *) memalign(GRID_ALLOC_ALIGN, bytes);
|
||||
#endif
|
||||
uint8_t *cp = (uint8_t *)ptr;
|
||||
if ( ptr ) {
|
||||
// One touch per 4k page, static OMP loop to catch same loop order
|
||||
@ -215,7 +284,10 @@ public:
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
void deallocate(pointer __p, size_type) {
|
||||
void deallocate(pointer __p, size_type __n) {
|
||||
size_type bytes = __n*sizeof(_Tp);
|
||||
|
||||
profilerFree(bytes);
|
||||
#ifdef HAVE_MM_MALLOC_H
|
||||
_mm_free((void *)__p);
|
||||
#else
|
||||
|
@ -134,8 +134,18 @@ void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t
|
||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent,int &srank)
|
||||
{
|
||||
_ndimension = processors.size();
|
||||
assert(_ndimension = parent._ndimension);
|
||||
|
||||
|
||||
int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension);
|
||||
std::vector<int> parent_processor_coor(_ndimension,0);
|
||||
std::vector<int> parent_processors (_ndimension,1);
|
||||
|
||||
// Can make 5d grid from 4d etc...
|
||||
int pad = _ndimension-parent_ndimension;
|
||||
for(int d=0;d<parent_ndimension;d++){
|
||||
parent_processor_coor[pad+d]=parent._processor_coor[d];
|
||||
parent_processors [pad+d]=parent._processors[d];
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// split the communicator
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -154,9 +164,9 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,
|
||||
std::vector<int> ssize(_ndimension); // coor of split within parent
|
||||
|
||||
for(int d=0;d<_ndimension;d++){
|
||||
ccoor[d] = parent._processor_coor[d] % processors[d];
|
||||
scoor[d] = parent._processor_coor[d] / processors[d];
|
||||
ssize[d] = parent._processors[d] / processors[d];
|
||||
ccoor[d] = parent_processor_coor[d] % processors[d];
|
||||
scoor[d] = parent_processor_coor[d] / processors[d];
|
||||
ssize[d] = parent_processors[d] / processors[d];
|
||||
}
|
||||
int crank; // rank within subcomm ; srank is rank of subcomm within blocks of subcomms
|
||||
// Mpi uses the reverse Lexico convention to us
|
||||
@ -166,38 +176,36 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,
|
||||
MPI_Comm comm_split;
|
||||
if ( Nchild > 1 ) {
|
||||
|
||||
/*
|
||||
std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec<<std::endl;
|
||||
std::cout << GridLogMessage<<" parent grid["<< parent._ndimension<<"] ";
|
||||
for(int d=0;d<parent._processors.size();d++) std::cout << parent._processors[d] << " ";
|
||||
std::cout<<std::endl;
|
||||
|
||||
std::cout << GridLogMessage<<" child grid["<< _ndimension <<"] ";
|
||||
for(int d=0;d<processors.size();d++) std::cout << processors[d] << " ";
|
||||
std::cout<<std::endl;
|
||||
|
||||
std::cout << GridLogMessage<<" old rank "<< parent._processor<<" coor ["<< _ndimension <<"] ";
|
||||
for(int d=0;d<processors.size();d++) std::cout << parent._processor_coor[d] << " ";
|
||||
std::cout<<std::endl;
|
||||
|
||||
std::cout << GridLogMessage<<" new rank "<< crank<<" coor ["<< _ndimension <<"] ";
|
||||
for(int d=0;d<processors.size();d++) std::cout << ccoor[d] << " ";
|
||||
std::cout<<std::endl;
|
||||
|
||||
std::cout << GridLogMessage<<" new coor ["<< _ndimension <<"] ";
|
||||
for(int d=0;d<processors.size();d++) std::cout << parent._processor_coor[d] << " ";
|
||||
std::cout<<std::endl;
|
||||
*/
|
||||
if(0){
|
||||
std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec<<std::endl;
|
||||
std::cout << GridLogMessage<<" parent grid["<< parent._ndimension<<"] ";
|
||||
for(int d=0;d<parent._ndimension;d++) std::cout << parent._processors[d] << " ";
|
||||
std::cout<<std::endl;
|
||||
|
||||
std::cout << GridLogMessage<<" child grid["<< _ndimension <<"] ";
|
||||
for(int d=0;d<processors.size();d++) std::cout << processors[d] << " ";
|
||||
std::cout<<std::endl;
|
||||
|
||||
std::cout << GridLogMessage<<" old rank "<< parent._processor<<" coor ["<< parent._ndimension <<"] ";
|
||||
for(int d=0;d<parent._ndimension;d++) std::cout << parent._processor_coor[d] << " ";
|
||||
std::cout<<std::endl;
|
||||
|
||||
std::cout << GridLogMessage<<" new split "<< srank<<" scoor ["<< _ndimension <<"] ";
|
||||
for(int d=0;d<processors.size();d++) std::cout << scoor[d] << " ";
|
||||
std::cout<<std::endl;
|
||||
|
||||
std::cout << GridLogMessage<<" new rank "<< crank<<" coor ["<< _ndimension <<"] ";
|
||||
for(int d=0;d<processors.size();d++) std::cout << ccoor[d] << " ";
|
||||
std::cout<<std::endl;
|
||||
}
|
||||
|
||||
int ierr= MPI_Comm_split(parent.communicator,srank,crank,&comm_split);
|
||||
assert(ierr==0);
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Declare victory
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
/*
|
||||
std::cout << GridLogMessage<<"Divided communicator "<< parent._Nprocessors<<" into "
|
||||
<< Nchild <<" communicators with " << childsize << " ranks"<<std::endl;
|
||||
*/
|
||||
// std::cout << GridLogMessage<<"Divided communicator "<< parent._Nprocessors<<" into "
|
||||
// << Nchild <<" communicators with " << childsize << " ranks"<<std::endl;
|
||||
} else {
|
||||
comm_split=parent.communicator;
|
||||
srank = 0;
|
||||
@ -207,6 +215,17 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,
|
||||
// Set up from the new split communicator
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
InitFromMPICommunicator(processors,comm_split);
|
||||
|
||||
if(0){
|
||||
std::cout << " ndim " <<_ndimension<<" " << parent._ndimension << std::endl;
|
||||
for(int d=0;d<processors.size();d++){
|
||||
std::cout << d<< " " << _processor_coor[d] <<" " << ccoor[d]<<std::endl;
|
||||
}
|
||||
}
|
||||
for(int d=0;d<processors.size();d++){
|
||||
assert(_processor_coor[d] == ccoor[d] );
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -231,7 +250,7 @@ void CartesianCommunicator::InitFromMPICommunicator(const std::vector<int> &proc
|
||||
MPI_Comm_rank(communicator,&_processor);
|
||||
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
|
||||
|
||||
if ( communicator_base != communicator_world ) {
|
||||
if ( 0 && (communicator_base != communicator_world) ) {
|
||||
std::cout << "InitFromMPICommunicator Cartesian communicator created with a non-world communicator"<<std::endl;
|
||||
|
||||
std::cout << " new communicator rank "<<_processor<< " coor ["<<_ndimension<<"] ";
|
||||
|
@ -276,10 +276,11 @@ class CartesianCommunicator {
|
||||
assert(in.size()==out.size());
|
||||
uint64_t bytes=sizeof(T);
|
||||
uint64_t words=in.size()/numnode;
|
||||
|
||||
// std:: cout << "AllToAll buffer size "<< in.size()*sizeof(T)<<std::endl;
|
||||
// std:: cout << "AllToAll datum bytes "<< bytes<<std::endl;
|
||||
// std:: cout << "AllToAll datum count "<< words<<std::endl;
|
||||
assert(numnode * words == in.size());
|
||||
assert(words < (1ULL<<32));
|
||||
|
||||
assert(words < (1ULL<<31));
|
||||
AllToAll(dim,(void *)&in[0],(void *)&out[0],words,bytes);
|
||||
}
|
||||
void AllToAll(int dim ,void *in,void *out,uint64_t words,uint64_t bytes);
|
||||
|
@ -606,7 +606,7 @@ CartesianCommunicator::~CartesianCommunicator()
|
||||
MPI_Finalized(&MPI_is_finalised);
|
||||
if (communicator && !MPI_is_finalised) {
|
||||
MPI_Comm_free(&communicator);
|
||||
for(int i=0;i< communicator_halo.size();i++){
|
||||
for(int i=0;i<communicator_halo.size();i++){
|
||||
MPI_Comm_free(&communicator_halo[i]);
|
||||
}
|
||||
}
|
||||
|
@ -77,9 +77,6 @@ namespace Grid {
|
||||
|
||||
|
||||
// merge of April 11 2017
|
||||
//<<<<<<< HEAD
|
||||
|
||||
|
||||
// this function is necessary for the LS vectorised field
|
||||
inline int RNGfillable_general(GridBase *coarse,GridBase *fine)
|
||||
{
|
||||
@ -91,7 +88,6 @@ namespace Grid {
|
||||
// all further divisions are local
|
||||
for(int d=0;d<lowerdims;d++) assert(fine->_processors[d]==1);
|
||||
for(int d=0;d<rngdims;d++) assert(coarse->_processors[d] == fine->_processors[d+lowerdims]);
|
||||
|
||||
|
||||
// then divide the number of local sites
|
||||
// check that the total number of sims agree, meanse the iSites are the same
|
||||
@ -102,27 +98,6 @@ namespace Grid {
|
||||
|
||||
return fine->lSites() / coarse->lSites();
|
||||
}
|
||||
|
||||
/*
|
||||
// Wrap seed_seq to give common interface with random_device
|
||||
class fixedSeed {
|
||||
public:
|
||||
typedef std::seed_seq::result_type result_type;
|
||||
std::seed_seq src;
|
||||
|
||||
fixedSeed(const std::vector<int> &seeds) : src(seeds.begin(),seeds.end()) {};
|
||||
|
||||
result_type operator () (void){
|
||||
std::vector<result_type> list(1);
|
||||
src.generate(list.begin(),list.end());
|
||||
return list[0];
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
=======
|
||||
>>>>>>> develop
|
||||
*/
|
||||
|
||||
// real scalars are one component
|
||||
template<class scalar,class distribution,class generator>
|
||||
@ -171,7 +146,7 @@ namespace Grid {
|
||||
// support for parallel init
|
||||
///////////////////////
|
||||
#ifdef RNG_FAST_DISCARD
|
||||
static void Skip(RngEngine &eng)
|
||||
static void Skip(RngEngine &eng,uint64_t site)
|
||||
{
|
||||
/////////////////////////////////////////////////////////////////////////////////////
|
||||
// Skip by 2^40 elements between successive lattice sites
|
||||
@ -184,8 +159,11 @@ namespace Grid {
|
||||
// and margin of safety is orders of magnitude.
|
||||
// We could hack Sitmo to skip in the higher order words of state if necessary
|
||||
/////////////////////////////////////////////////////////////////////////////////////
|
||||
uint64_t skip = 0x1; skip = skip<<40;
|
||||
// uint64_t skip = site+1; // Old init Skipped then drew. Checked compat with faster init
|
||||
uint64_t skip = site;
|
||||
skip = skip<<40;
|
||||
eng.discard(skip);
|
||||
// std::cout << " Engine " <<site << " state " <<eng<<std::endl;
|
||||
}
|
||||
#endif
|
||||
static RngEngine Reseed(RngEngine &eng)
|
||||
@ -407,15 +385,14 @@ namespace Grid {
|
||||
// MT implementation does not implement fast discard even though
|
||||
// in principle this is possible
|
||||
////////////////////////////////////////////////
|
||||
std::vector<int> gcoor;
|
||||
int rank,o_idx,i_idx;
|
||||
|
||||
// Everybody loops over global volume.
|
||||
for(int gidx=0;gidx<_grid->_gsites;gidx++){
|
||||
|
||||
Skip(master_engine); // Skip to next RNG sequence
|
||||
parallel_for(int gidx=0;gidx<_grid->_gsites;gidx++){
|
||||
|
||||
// Where is it?
|
||||
int rank,o_idx,i_idx;
|
||||
std::vector<int> gcoor;
|
||||
|
||||
_grid->GlobalIndexToGlobalCoor(gidx,gcoor);
|
||||
_grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
|
||||
|
||||
@ -423,6 +400,7 @@ namespace Grid {
|
||||
if( rank == _grid->ThisRank() ){
|
||||
int l_idx=generator_idx(o_idx,i_idx);
|
||||
_generators[l_idx] = master_engine;
|
||||
Skip(_generators[l_idx],gidx); // Skip to next RNG sequence
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -50,26 +50,22 @@ inline void subdivides(GridBase *coarse,GridBase *fine)
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
template<class vobj> inline void pickCheckerboard(int cb,Lattice<vobj> &half,const Lattice<vobj> &full){
|
||||
half.checkerboard = cb;
|
||||
int ssh=0;
|
||||
//parallel_for
|
||||
for(int ss=0;ss<full._grid->oSites();ss++){
|
||||
std::vector<int> coor;
|
||||
|
||||
parallel_for(int ss=0;ss<full._grid->oSites();ss++){
|
||||
int cbos;
|
||||
|
||||
std::vector<int> coor;
|
||||
full._grid->oCoorFromOindex(coor,ss);
|
||||
cbos=half._grid->CheckerBoard(coor);
|
||||
|
||||
if (cbos==cb) {
|
||||
int ssh=half._grid->oIndex(coor);
|
||||
half._odata[ssh] = full._odata[ss];
|
||||
ssh++;
|
||||
}
|
||||
}
|
||||
}
|
||||
template<class vobj> inline void setCheckerboard(Lattice<vobj> &full,const Lattice<vobj> &half){
|
||||
int cb = half.checkerboard;
|
||||
int ssh=0;
|
||||
//parallel_for
|
||||
for(int ss=0;ss<full._grid->oSites();ss++){
|
||||
parallel_for(int ss=0;ss<full._grid->oSites();ss++){
|
||||
std::vector<int> coor;
|
||||
int cbos;
|
||||
|
||||
@ -77,8 +73,8 @@ inline void subdivides(GridBase *coarse,GridBase *fine)
|
||||
cbos=half._grid->CheckerBoard(coor);
|
||||
|
||||
if (cbos==cb) {
|
||||
int ssh=half._grid->oIndex(coor);
|
||||
full._odata[ss]=half._odata[ssh];
|
||||
ssh++;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -698,30 +694,6 @@ void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in){
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Communicate between grids
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// All to all plan
|
||||
//
|
||||
// Subvolume on fine grid is v. Vectors a,b,c,d
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// SIMPLEST CASE:
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Mesh of nodes (2) ; subdivide to 1 subdivisions
|
||||
//
|
||||
// Lex ord:
|
||||
// N0 va0 vb0 N1 va1 vb1
|
||||
//
|
||||
// For each dimension do an all to all
|
||||
//
|
||||
// full AllToAll(0)
|
||||
// N0 va0 va1 N1 vb0 vb1
|
||||
//
|
||||
// REARRANGE
|
||||
// N0 va01 N1 vb01
|
||||
//
|
||||
// Must also rearrange data to get into the NEW lex order of grid at each stage. Some kind of "insert/extract".
|
||||
// NB: Easiest to programme if keep in lex order.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// SIMPLE CASE:
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -755,9 +727,17 @@ void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in){
|
||||
//
|
||||
// Must also rearrange data to get into the NEW lex order of grid at each stage. Some kind of "insert/extract".
|
||||
// NB: Easiest to programme if keep in lex order.
|
||||
//
|
||||
/////////////////////////////////////////////////////////
|
||||
|
||||
/*
|
||||
* Let chunk = (fvol*nvec)/sP be size of a chunk. ( Divide lexico vol * nvec into fP/sP = M chunks )
|
||||
*
|
||||
* 2nd A2A (over sP nodes; subdivide the fP into sP chunks of M)
|
||||
*
|
||||
* node 0 1st chunk of node 0M..(1M-1); 2nd chunk of node 0M..(1M-1).. data chunk x M x sP = fL / sP * M * sP = fL * M growth
|
||||
* node 1 1st chunk of node 1M..(2M-1); 2nd chunk of node 1M..(2M-1)..
|
||||
* node 2 1st chunk of node 2M..(3M-1); 2nd chunk of node 2M..(3M-1)..
|
||||
* node 3 1st chunk of node 3M..(3M-1); 2nd chunk of node 2M..(3M-1)..
|
||||
* etc...
|
||||
*/
|
||||
template<class Vobj>
|
||||
void Grid_split(std::vector<Lattice<Vobj> > & full,Lattice<Vobj> & split)
|
||||
{
|
||||
@ -816,57 +796,58 @@ void Grid_split(std::vector<Lattice<Vobj> > & full,Lattice<Vobj> & split)
|
||||
|
||||
int nvec = nvector; // Counts down to 1 as we collapse dims
|
||||
std::vector<int> ldims = full_grid->_ldimensions;
|
||||
std::vector<int> lcoor(ndim);
|
||||
|
||||
for(int d=ndim-1;d>=0;d--){
|
||||
|
||||
if ( ratio[d] != 1 ) {
|
||||
|
||||
full_grid ->AllToAll(d,alldata,tmpdata);
|
||||
// std::cout << GridLogMessage << "Grid_split: dim " <<d<<" ratio "<<ratio[d]<<" nvec "<<nvec<<" procs "<<split_grid->_processors[d]<<std::endl;
|
||||
// for(int v=0;v<nvec;v++){
|
||||
// std::cout << "Grid_split: alldata["<<v<<"] " << alldata[v] <<std::endl;
|
||||
// std::cout << "Grid_split: tmpdata["<<v<<"] " << tmpdata[v] <<std::endl;
|
||||
// }
|
||||
//////////////////////////////////////////
|
||||
//Local volume for this dimension is expanded by ratio of processor extents
|
||||
// Number of vectors is decreased by same factor
|
||||
// Rearrange to lexico for bigger volume
|
||||
//////////////////////////////////////////
|
||||
nvec /= ratio[d];
|
||||
if ( split_grid->_processors[d] > 1 ) {
|
||||
alldata=tmpdata;
|
||||
split_grid->AllToAll(d,alldata,tmpdata);
|
||||
}
|
||||
|
||||
auto rdims = ldims; rdims[d] *= ratio[d];
|
||||
auto rsites= lsites*ratio[d];
|
||||
for(int v=0;v<nvec;v++){
|
||||
auto rdims = ldims;
|
||||
auto M = ratio[d];
|
||||
auto rsites= lsites*M;// increases rsites by M
|
||||
nvec /= M; // Reduce nvec by subdivision factor
|
||||
rdims[d] *= M; // increase local dim by same factor
|
||||
|
||||
// For loop over each site within old subvol
|
||||
for(int lsite=0;lsite<lsites;lsite++){
|
||||
int sP = split_grid->_processors[d];
|
||||
int fP = full_grid->_processors[d];
|
||||
|
||||
Lexicographic::CoorFromIndex(lcoor, lsite, ldims);
|
||||
int fvol = lsites;
|
||||
|
||||
int chunk = (nvec*fvol)/sP; assert(chunk*sP == nvec*fvol);
|
||||
|
||||
for(int r=0;r<ratio[d];r++){ // ratio*nvec terms
|
||||
// Loop over reordered data post A2A
|
||||
parallel_for(int c=0;c<chunk;c++){
|
||||
std::vector<int> coor(ndim);
|
||||
for(int m=0;m<M;m++){
|
||||
for(int s=0;s<sP;s++){
|
||||
|
||||
// addressing; use lexico
|
||||
int lex_r;
|
||||
uint64_t lex_c = c+chunk*m+chunk*M*s;
|
||||
uint64_t lex_fvol_vec = c+chunk*s;
|
||||
uint64_t lex_fvol = lex_fvol_vec%fvol;
|
||||
uint64_t lex_vec = lex_fvol_vec/fvol;
|
||||
|
||||
auto rcoor = lcoor; rcoor[d] += r*ldims[d];
|
||||
// which node sets an adder to the coordinate
|
||||
Lexicographic::CoorFromIndex(coor, lex_fvol, ldims);
|
||||
coor[d] += m*ldims[d];
|
||||
Lexicographic::IndexFromCoor(coor, lex_r, rdims);
|
||||
lex_r += lex_vec * rsites;
|
||||
|
||||
int rsite; Lexicographic::IndexFromCoor(rcoor, rsite, rdims);
|
||||
rsite += v * rsites;
|
||||
// LexicoFind coordinate & vector number within split lattice
|
||||
alldata[lex_r] = tmpdata[lex_c];
|
||||
|
||||
int rmul=nvec*lsites;
|
||||
int vmul= lsites;
|
||||
alldata[rsite] = tmpdata[lsite+r*rmul+v*vmul];
|
||||
// if ( lsite==0 ) {
|
||||
// std::cout << "Grid_split: grow alldata["<<rsite<<"] " << alldata[rsite] << " <- tmpdata["<< lsite+r*rmul+v*vmul<<"] "<<tmpdata[lsite+r*rmul+v*vmul] <<std::endl;
|
||||
// }
|
||||
}
|
||||
}
|
||||
}
|
||||
ldims[d]*= ratio[d];
|
||||
lsites *= ratio[d];
|
||||
|
||||
if ( split_grid->_processors[d] > 1 ) {
|
||||
tmpdata = alldata;
|
||||
split_grid->AllToAll(d,tmpdata,alldata);
|
||||
}
|
||||
}
|
||||
}
|
||||
vectorizeFromLexOrdArray(alldata,split);
|
||||
@ -937,72 +918,74 @@ void Grid_unsplit(std::vector<Lattice<Vobj> > & full,Lattice<Vobj> & split)
|
||||
/////////////////////////////////////////////////////////////////
|
||||
// Start from split grid and work towards full grid
|
||||
/////////////////////////////////////////////////////////////////
|
||||
std::vector<int> lcoor(ndim);
|
||||
std::vector<int> rcoor(ndim);
|
||||
|
||||
int nvec = 1;
|
||||
lsites = split_grid->lSites();
|
||||
std::vector<int> ldims = split_grid->_ldimensions;
|
||||
uint64_t rsites = split_grid->lSites();
|
||||
std::vector<int> rdims = split_grid->_ldimensions;
|
||||
|
||||
// for(int d=ndim-1;d>=0;d--){
|
||||
for(int d=0;d<ndim;d++){
|
||||
|
||||
if ( ratio[d] != 1 ) {
|
||||
|
||||
auto M = ratio[d];
|
||||
|
||||
if ( split_grid->_processors[d] > 1 ) {
|
||||
tmpdata = alldata;
|
||||
split_grid->AllToAll(d,tmpdata,alldata);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////
|
||||
//Local volume for this dimension is expanded by ratio of processor extents
|
||||
// Number of vectors is decreased by same factor
|
||||
// Rearrange to lexico for bigger volume
|
||||
//////////////////////////////////////////
|
||||
auto rsites= lsites/ratio[d];
|
||||
auto rdims = ldims; rdims[d]/=ratio[d];
|
||||
|
||||
for(int v=0;v<nvec;v++){
|
||||
|
||||
// rsite, rcoor --> smaller local volume
|
||||
// lsite, lcoor --> bigger original (single node?) volume
|
||||
// For loop over each site within smaller subvol
|
||||
for(int rsite=0;rsite<rsites;rsite++){
|
||||
|
||||
Lexicographic::CoorFromIndex(rcoor, rsite, rdims);
|
||||
int lsite;
|
||||
|
||||
for(int r=0;r<ratio[d];r++){
|
||||
|
||||
lcoor = rcoor; lcoor[d] += r*rdims[d];
|
||||
Lexicographic::IndexFromCoor(lcoor, lsite, ldims); lsite += v * lsites;
|
||||
|
||||
int rmul=nvec*rsites;
|
||||
int vmul= rsites;
|
||||
tmpdata[rsite+r*rmul+v*vmul]=alldata[lsite];
|
||||
int sP = split_grid->_processors[d];
|
||||
int fP = full_grid->_processors[d];
|
||||
|
||||
auto ldims = rdims; ldims[d] /= M; // Decrease local dims by same factor
|
||||
auto lsites= rsites/M; // Decreases rsites by M
|
||||
|
||||
int fvol = lsites;
|
||||
int chunk = (nvec*fvol)/sP; assert(chunk*sP == nvec*fvol);
|
||||
|
||||
{
|
||||
// Loop over reordered data post A2A
|
||||
parallel_for(int c=0;c<chunk;c++){
|
||||
std::vector<int> coor(ndim);
|
||||
for(int m=0;m<M;m++){
|
||||
for(int s=0;s<sP;s++){
|
||||
|
||||
// addressing; use lexico
|
||||
int lex_r;
|
||||
uint64_t lex_c = c+chunk*m+chunk*M*s;
|
||||
uint64_t lex_fvol_vec = c+chunk*s;
|
||||
uint64_t lex_fvol = lex_fvol_vec%fvol;
|
||||
uint64_t lex_vec = lex_fvol_vec/fvol;
|
||||
|
||||
// which node sets an adder to the coordinate
|
||||
Lexicographic::CoorFromIndex(coor, lex_fvol, ldims);
|
||||
coor[d] += m*ldims[d];
|
||||
Lexicographic::IndexFromCoor(coor, lex_r, rdims);
|
||||
lex_r += lex_vec * rsites;
|
||||
|
||||
// LexicoFind coordinate & vector number within split lattice
|
||||
tmpdata[lex_c] = alldata[lex_r];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
nvec *= ratio[d];
|
||||
ldims[d]=rdims[d];
|
||||
lsites =rsites;
|
||||
|
||||
if ( split_grid->_processors[d] > 1 ) {
|
||||
split_grid->AllToAll(d,tmpdata,alldata);
|
||||
tmpdata=alldata;
|
||||
}
|
||||
full_grid ->AllToAll(d,tmpdata,alldata);
|
||||
rdims[d]/= M;
|
||||
rsites /= M;
|
||||
nvec *= M; // Increase nvec by subdivision factor
|
||||
}
|
||||
}
|
||||
|
||||
lsites = full_grid->lSites();
|
||||
for(int v=0;v<nvector;v++){
|
||||
assert(v<full.size());
|
||||
// assert(v<full.size());
|
||||
parallel_for(int site=0;site<lsites;site++){
|
||||
// assert(v*lsites+site < alldata.size());
|
||||
scalardata[site] = alldata[v*lsites+site];
|
||||
}
|
||||
vectorizeFromLexOrdArray(scalardata,full[v]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
#endif
|
||||
|
@ -492,6 +492,14 @@ namespace QCD {
|
||||
return traceIndex<ColourIndex>(lhs);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////
|
||||
// Current types
|
||||
//////////////////////////////////////////
|
||||
GRID_SERIALIZABLE_ENUM(Current, undef,
|
||||
Vector, 0,
|
||||
Axial, 1,
|
||||
Tadpole, 2);
|
||||
|
||||
} //namespace QCD
|
||||
} // Grid
|
||||
|
||||
|
@ -47,6 +47,7 @@ namespace Grid {
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
|
||||
FermionOperator(const ImplParams &p= ImplParams()) : Impl(p) {};
|
||||
virtual ~FermionOperator(void) = default;
|
||||
|
||||
virtual FermionField &tmp(void) = 0;
|
||||
|
||||
@ -112,6 +113,21 @@ namespace Grid {
|
||||
///////////////////////////////////////////////
|
||||
virtual void ImportGauge(const GaugeField & _U)=0;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Conserved currents, either contract at sink or insert sequentially.
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
virtual void ContractConservedCurrent(PropagatorField &q_in_1,
|
||||
PropagatorField &q_in_2,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu)=0;
|
||||
virtual void SeqConservedCurrent(PropagatorField &q_in,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu,
|
||||
std::vector<Real> mom,
|
||||
unsigned int tmin,
|
||||
unsigned int tmax)=0;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -212,6 +212,13 @@ namespace QCD {
|
||||
StencilImpl &St) {
|
||||
mult(&phi(), &U(mu), &chi());
|
||||
}
|
||||
|
||||
inline void multLinkProp(SitePropagator &phi,
|
||||
const SiteDoubledGaugeField &U,
|
||||
const SitePropagator &chi,
|
||||
int mu) {
|
||||
mult(&phi(), &U(mu), &chi());
|
||||
}
|
||||
|
||||
template <class ref>
|
||||
inline void loadLinkElement(Simd ®, ref &memory) {
|
||||
@ -340,7 +347,20 @@ class DomainWallVec5dImpl : public PeriodicGaugeImpl< GaugeImplTypes< S,Nrepres
|
||||
}
|
||||
mult(&phi(), &UU(), &chi());
|
||||
}
|
||||
|
||||
|
||||
inline void multLinkProp(SitePropagator &phi,
|
||||
const SiteDoubledGaugeField &U,
|
||||
const SitePropagator &chi,
|
||||
int mu) {
|
||||
SiteGaugeLink UU;
|
||||
for (int i = 0; i < Nrepresentation; i++) {
|
||||
for (int j = 0; j < Nrepresentation; j++) {
|
||||
vsplat(UU()()(i, j), U(mu)()(i, j));
|
||||
}
|
||||
}
|
||||
mult(&phi(), &UU(), &chi());
|
||||
}
|
||||
|
||||
inline void DoubleStore(GridBase *GaugeGrid, DoubledGaugeField &Uds,const GaugeField &Umu)
|
||||
{
|
||||
SiteScalarGaugeField ScalarUmu;
|
||||
@ -537,7 +557,12 @@ class GparityWilsonImpl : public ConjugateGaugeImpl<GaugeImplTypes<S, Nrepresent
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Fixme: Gparity prop * link
|
||||
inline void multLinkProp(SitePropagator &phi, const SiteDoubledGaugeField &U,
|
||||
const SitePropagator &chi, int mu)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
template <class ref>
|
||||
inline void loadLinkElement(Simd ®, ref &memory) {
|
||||
|
@ -393,6 +393,31 @@ void ImprovedStaggeredFermion<Impl>::DhopInternal(StencilImpl &st, LebesgueOrder
|
||||
}
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Conserved current - not yet implemented.
|
||||
////////////////////////////////////////////////////////
|
||||
template <class Impl>
|
||||
void ImprovedStaggeredFermion<Impl>::ContractConservedCurrent(PropagatorField &q_in_1,
|
||||
PropagatorField &q_in_2,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void ImprovedStaggeredFermion<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu,
|
||||
std::vector<Real> mom,
|
||||
unsigned int tmin,
|
||||
unsigned int tmax)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
FermOpStaggeredTemplateInstantiate(ImprovedStaggeredFermion);
|
||||
|
||||
//AdjointFermOpTemplateInstantiate(ImprovedStaggeredFermion);
|
||||
|
@ -157,6 +157,22 @@ class ImprovedStaggeredFermion : public StaggeredKernels<Impl>, public ImprovedS
|
||||
|
||||
LebesgueOrder Lebesgue;
|
||||
LebesgueOrder LebesgueEvenOdd;
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Conserved current utilities
|
||||
///////////////////////////////////////////////////////////////
|
||||
void ContractConservedCurrent(PropagatorField &q_in_1,
|
||||
PropagatorField &q_in_2,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu);
|
||||
void SeqConservedCurrent(PropagatorField &q_in,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu,
|
||||
std::vector<Real> mom,
|
||||
unsigned int tmin,
|
||||
unsigned int tmax);
|
||||
};
|
||||
|
||||
typedef ImprovedStaggeredFermion<StaggeredImplF> ImprovedStaggeredFermionF;
|
||||
|
@ -405,6 +405,30 @@ void ImprovedStaggeredFermion5D<Impl>::MooeeInvDag(const FermionField &in,
|
||||
MooeeInv(in, out);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Conserved current - not yet implemented.
|
||||
////////////////////////////////////////////////////////
|
||||
template <class Impl>
|
||||
void ImprovedStaggeredFermion5D<Impl>::ContractConservedCurrent(PropagatorField &q_in_1,
|
||||
PropagatorField &q_in_2,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void ImprovedStaggeredFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu,
|
||||
std::vector<Real> mom,
|
||||
unsigned int tmin,
|
||||
unsigned int tmax)
|
||||
{
|
||||
assert(0);
|
||||
}
|
||||
|
||||
FermOpStaggeredTemplateInstantiate(ImprovedStaggeredFermion5D);
|
||||
FermOpStaggeredVec5dTemplateInstantiate(ImprovedStaggeredFermion5D);
|
||||
|
@ -170,6 +170,21 @@ namespace QCD {
|
||||
// Comms buffer
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > comm_buf;
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Conserved current utilities
|
||||
///////////////////////////////////////////////////////////////
|
||||
void ContractConservedCurrent(PropagatorField &q_in_1,
|
||||
PropagatorField &q_in_2,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu);
|
||||
void SeqConservedCurrent(PropagatorField &q_in,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu,
|
||||
std::vector<Real> mom,
|
||||
unsigned int tmin,
|
||||
unsigned int tmax);
|
||||
};
|
||||
|
||||
}}
|
||||
|
@ -345,6 +345,112 @@ void WilsonFermion<Impl>::DhopInternal(StencilImpl &st, LebesgueOrder &lo,
|
||||
}
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* Conserved current utilities for Wilson fermions, for contracting propagators
|
||||
* to make a conserved current sink or inserting the conserved current
|
||||
* sequentially.
|
||||
******************************************************************************/
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::ContractConservedCurrent(PropagatorField &q_in_1,
|
||||
PropagatorField &q_in_2,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu)
|
||||
{
|
||||
Gamma g5(Gamma::Algebra::Gamma5);
|
||||
conformable(_grid, q_in_1._grid);
|
||||
conformable(_grid, q_in_2._grid);
|
||||
conformable(_grid, q_out._grid);
|
||||
PropagatorField tmp1(_grid), tmp2(_grid);
|
||||
q_out = zero;
|
||||
|
||||
// Forward, need q1(x + mu), q2(x). Backward, need q1(x), q2(x + mu).
|
||||
// Inefficient comms method but not performance critical.
|
||||
tmp1 = Cshift(q_in_1, mu, 1);
|
||||
tmp2 = Cshift(q_in_2, mu, 1);
|
||||
parallel_for (unsigned int sU = 0; sU < Umu._grid->oSites(); ++sU)
|
||||
{
|
||||
Kernels::ContractConservedCurrentSiteFwd(tmp1._odata[sU],
|
||||
q_in_2._odata[sU],
|
||||
q_out._odata[sU],
|
||||
Umu, sU, mu);
|
||||
Kernels::ContractConservedCurrentSiteBwd(q_in_1._odata[sU],
|
||||
tmp2._odata[sU],
|
||||
q_out._odata[sU],
|
||||
Umu, sU, mu);
|
||||
}
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu,
|
||||
std::vector<Real> mom,
|
||||
unsigned int tmin,
|
||||
unsigned int tmax)
|
||||
{
|
||||
conformable(_grid, q_in._grid);
|
||||
conformable(_grid, q_out._grid);
|
||||
Lattice<iSinglet<Simd>> ph(_grid), coor(_grid);
|
||||
Complex i(0.0,1.0);
|
||||
PropagatorField tmpFwd(_grid), tmpBwd(_grid), tmp(_grid);
|
||||
unsigned int tshift = (mu == Tp) ? 1 : 0;
|
||||
unsigned int LLt = GridDefaultLatt()[Tp];
|
||||
|
||||
// Momentum projection
|
||||
ph = zero;
|
||||
for(unsigned int mu = 0; mu < Nd - 1; mu++)
|
||||
{
|
||||
LatticeCoordinate(coor, mu);
|
||||
ph = ph + mom[mu]*coor*((1./(_grid->_fdimensions[mu])));
|
||||
}
|
||||
ph = exp((Real)(2*M_PI)*i*ph);
|
||||
|
||||
q_out = zero;
|
||||
LatticeInteger coords(_grid);
|
||||
LatticeCoordinate(coords, Tp);
|
||||
|
||||
// Need q(x + mu) and q(x - mu).
|
||||
tmp = Cshift(q_in, mu, 1);
|
||||
tmpFwd = tmp*ph;
|
||||
tmp = ph*q_in;
|
||||
tmpBwd = Cshift(tmp, mu, -1);
|
||||
|
||||
parallel_for (unsigned int sU = 0; sU < Umu._grid->oSites(); ++sU)
|
||||
{
|
||||
// Compute the sequential conserved current insertion only if our simd
|
||||
// object contains a timeslice we need.
|
||||
vInteger t_mask = ((coords._odata[sU] >= tmin) &&
|
||||
(coords._odata[sU] <= tmax));
|
||||
Integer timeSlices = Reduce(t_mask);
|
||||
|
||||
if (timeSlices > 0)
|
||||
{
|
||||
Kernels::SeqConservedCurrentSiteFwd(tmpFwd._odata[sU],
|
||||
q_out._odata[sU],
|
||||
Umu, sU, mu, t_mask);
|
||||
}
|
||||
|
||||
// Repeat for backward direction.
|
||||
t_mask = ((coords._odata[sU] >= (tmin + tshift)) &&
|
||||
(coords._odata[sU] <= (tmax + tshift)));
|
||||
|
||||
//if tmax = LLt-1 (last timeslice) include timeslice 0 if the time is shifted (mu=3)
|
||||
unsigned int t0 = 0;
|
||||
if((tmax==LLt-1) && (tshift==1)) t_mask = (t_mask || (coords._odata[sU] == t0 ));
|
||||
|
||||
timeSlices = Reduce(t_mask);
|
||||
|
||||
if (timeSlices > 0)
|
||||
{
|
||||
Kernels::SeqConservedCurrentSiteBwd(tmpBwd._odata[sU],
|
||||
q_out._odata[sU],
|
||||
Umu, sU, mu, t_mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
FermOpTemplateInstantiate(WilsonFermion);
|
||||
AdjointFermOpTemplateInstantiate(WilsonFermion);
|
||||
TwoIndexFermOpTemplateInstantiate(WilsonFermion);
|
||||
|
@ -146,6 +146,22 @@ class WilsonFermion : public WilsonKernels<Impl>, public WilsonFermionStatic {
|
||||
|
||||
LebesgueOrder Lebesgue;
|
||||
LebesgueOrder LebesgueEvenOdd;
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Conserved current utilities
|
||||
///////////////////////////////////////////////////////////////
|
||||
void ContractConservedCurrent(PropagatorField &q_in_1,
|
||||
PropagatorField &q_in_2,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu);
|
||||
void SeqConservedCurrent(PropagatorField &q_in,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu,
|
||||
std::vector<Real> mom,
|
||||
unsigned int tmin,
|
||||
unsigned int tmax);
|
||||
};
|
||||
|
||||
typedef WilsonFermion<WilsonImplF> WilsonFermionF;
|
||||
|
@ -12,6 +12,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||
Author: Andrew Lawson <andrew.lawson1991@gmail.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -702,6 +703,168 @@ void WilsonFermion5D<Impl>::MomentumSpacePropagatorHw(FermionField &out,const Fe
|
||||
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* Conserved current utilities for Wilson fermions, for contracting propagators
|
||||
* to make a conserved current sink or inserting the conserved current
|
||||
* sequentially.
|
||||
******************************************************************************/
|
||||
|
||||
// Helper macro to reverse Simd vector. Fixme: slow, generic implementation.
|
||||
#define REVERSE_LS(qSite, qSiteRev, Nsimd) \
|
||||
{ \
|
||||
std::vector<typename SitePropagator::scalar_object> qSiteVec(Nsimd); \
|
||||
extract(qSite, qSiteVec); \
|
||||
for (int i = 0; i < Nsimd / 2; ++i) \
|
||||
{ \
|
||||
typename SitePropagator::scalar_object tmp = qSiteVec[i]; \
|
||||
qSiteVec[i] = qSiteVec[Nsimd - i - 1]; \
|
||||
qSiteVec[Nsimd - i - 1] = tmp; \
|
||||
} \
|
||||
merge(qSiteRev, qSiteVec); \
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion5D<Impl>::ContractConservedCurrent(PropagatorField &q_in_1,
|
||||
PropagatorField &q_in_2,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu)
|
||||
{
|
||||
conformable(q_in_1._grid, FermionGrid());
|
||||
conformable(q_in_1._grid, q_in_2._grid);
|
||||
conformable(_FourDimGrid, q_out._grid);
|
||||
PropagatorField tmp1(FermionGrid()), tmp2(FermionGrid());
|
||||
unsigned int LLs = q_in_1._grid->_rdimensions[0];
|
||||
q_out = zero;
|
||||
|
||||
// Forward, need q1(x + mu, s), q2(x, Ls - 1 - s). Backward, need q1(x, s),
|
||||
// q2(x + mu, Ls - 1 - s). 5D lattice so shift 4D coordinate mu by one.
|
||||
tmp1 = Cshift(q_in_1, mu + 1, 1);
|
||||
tmp2 = Cshift(q_in_2, mu + 1, 1);
|
||||
parallel_for (unsigned int sU = 0; sU < Umu._grid->oSites(); ++sU)
|
||||
{
|
||||
unsigned int sF1 = sU * LLs;
|
||||
unsigned int sF2 = (sU + 1) * LLs - 1;
|
||||
|
||||
for (unsigned int s = 0; s < LLs; ++s)
|
||||
{
|
||||
bool axial_sign = ((curr_type == Current::Axial) && \
|
||||
(s < (LLs / 2)));
|
||||
SitePropagator qSite2, qmuSite2;
|
||||
|
||||
// If vectorised in 5th dimension, reverse q2 vector to match up
|
||||
// sites correctly.
|
||||
if (Impl::LsVectorised)
|
||||
{
|
||||
REVERSE_LS(q_in_2._odata[sF2], qSite2, Ls / LLs);
|
||||
REVERSE_LS(tmp2._odata[sF2], qmuSite2, Ls / LLs);
|
||||
}
|
||||
else
|
||||
{
|
||||
qSite2 = q_in_2._odata[sF2];
|
||||
qmuSite2 = tmp2._odata[sF2];
|
||||
}
|
||||
Kernels::ContractConservedCurrentSiteFwd(tmp1._odata[sF1],
|
||||
qSite2,
|
||||
q_out._odata[sU],
|
||||
Umu, sU, mu, axial_sign);
|
||||
Kernels::ContractConservedCurrentSiteBwd(q_in_1._odata[sF1],
|
||||
qmuSite2,
|
||||
q_out._odata[sU],
|
||||
Umu, sU, mu, axial_sign);
|
||||
sF1++;
|
||||
sF2--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <class Impl>
|
||||
void WilsonFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu,
|
||||
std::vector<Real> mom,
|
||||
unsigned int tmin,
|
||||
unsigned int tmax)
|
||||
{
|
||||
conformable(q_in._grid, FermionGrid());
|
||||
conformable(q_in._grid, q_out._grid);
|
||||
Lattice<iSinglet<Simd>> ph(FermionGrid()), coor(FermionGrid());
|
||||
PropagatorField tmpFwd(FermionGrid()), tmpBwd(FermionGrid()),
|
||||
tmp(FermionGrid());
|
||||
Complex i(0.0, 1.0);
|
||||
unsigned int tshift = (mu == Tp) ? 1 : 0;
|
||||
unsigned int LLs = q_in._grid->_rdimensions[0];
|
||||
unsigned int LLt = GridDefaultLatt()[Tp];
|
||||
|
||||
// Momentum projection.
|
||||
ph = zero;
|
||||
for(unsigned int nu = 0; nu < Nd - 1; nu++)
|
||||
{
|
||||
// Shift coordinate lattice index by 1 to account for 5th dimension.
|
||||
LatticeCoordinate(coor, nu + 1);
|
||||
ph = ph + mom[nu]*coor*((1./(_FourDimGrid->_fdimensions[nu])));
|
||||
}
|
||||
ph = exp((Real)(2*M_PI)*i*ph);
|
||||
|
||||
q_out = zero;
|
||||
LatticeInteger coords(_FourDimGrid);
|
||||
LatticeCoordinate(coords, Tp);
|
||||
|
||||
// Need q(x + mu, s) and q(x - mu, s). 5D lattice so shift 4D coordinate mu
|
||||
// by one.
|
||||
tmp = Cshift(q_in, mu + 1, 1);
|
||||
tmpFwd = tmp*ph;
|
||||
tmp = ph*q_in;
|
||||
tmpBwd = Cshift(tmp, mu + 1, -1);
|
||||
|
||||
parallel_for (unsigned int sU = 0; sU < Umu._grid->oSites(); ++sU)
|
||||
{
|
||||
// Compute the sequential conserved current insertion only if our simd
|
||||
// object contains a timeslice we need.
|
||||
vInteger t_mask = ((coords._odata[sU] >= tmin) &&
|
||||
(coords._odata[sU] <= tmax));
|
||||
Integer timeSlices = Reduce(t_mask);
|
||||
|
||||
if (timeSlices > 0)
|
||||
{
|
||||
unsigned int sF = sU * LLs;
|
||||
for (unsigned int s = 0; s < LLs; ++s)
|
||||
{
|
||||
bool axial_sign = ((curr_type == Current::Axial) && (s < (LLs / 2)));
|
||||
Kernels::SeqConservedCurrentSiteFwd(tmpFwd._odata[sF],
|
||||
q_out._odata[sF], Umu, sU,
|
||||
mu, t_mask, axial_sign);
|
||||
++sF;
|
||||
}
|
||||
}
|
||||
|
||||
// Repeat for backward direction.
|
||||
t_mask = ((coords._odata[sU] >= (tmin + tshift)) &&
|
||||
(coords._odata[sU] <= (tmax + tshift)));
|
||||
|
||||
//if tmax = LLt-1 (last timeslice) include timeslice 0 if the time is shifted (mu=3)
|
||||
unsigned int t0 = 0;
|
||||
if((tmax==LLt-1) && (tshift==1)) t_mask = (t_mask || (coords._odata[sU] == t0 ));
|
||||
|
||||
timeSlices = Reduce(t_mask);
|
||||
|
||||
if (timeSlices > 0)
|
||||
{
|
||||
unsigned int sF = sU * LLs;
|
||||
for (unsigned int s = 0; s < LLs; ++s)
|
||||
{
|
||||
bool axial_sign = ((curr_type == Current::Axial) && (s < (LLs / 2)));
|
||||
Kernels::SeqConservedCurrentSiteBwd(tmpBwd._odata[sF],
|
||||
q_out._odata[sF], Umu, sU,
|
||||
mu, t_mask, axial_sign);
|
||||
++sF;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
FermOpTemplateInstantiate(WilsonFermion5D);
|
||||
GparityFermOpTemplateInstantiate(WilsonFermion5D);
|
||||
|
||||
|
@ -214,6 +214,21 @@ namespace QCD {
|
||||
// Comms buffer
|
||||
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > comm_buf;
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Conserved current utilities
|
||||
///////////////////////////////////////////////////////////////
|
||||
void ContractConservedCurrent(PropagatorField &q_in_1,
|
||||
PropagatorField &q_in_2,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu);
|
||||
void SeqConservedCurrent(PropagatorField &q_in,
|
||||
PropagatorField &q_out,
|
||||
Current curr_type,
|
||||
unsigned int mu,
|
||||
std::vector<Real> mom,
|
||||
unsigned int tmin,
|
||||
unsigned int tmax);
|
||||
};
|
||||
|
||||
}}
|
||||
|
@ -281,6 +281,172 @@ void WilsonKernels<Impl>::DhopDir( StencilImpl &st, DoubledGaugeField &U,SiteHal
|
||||
vstream(out._odata[sF], result);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* Conserved current utilities for Wilson fermions, for contracting propagators
|
||||
* to make a conserved current sink or inserting the conserved current
|
||||
* sequentially. Common to both 4D and 5D.
|
||||
******************************************************************************/
|
||||
// N.B. Functions below assume a -1/2 factor within U.
|
||||
#define WilsonCurrentFwd(expr, mu) ((expr - Gamma::gmu[mu]*expr))
|
||||
#define WilsonCurrentBwd(expr, mu) ((expr + Gamma::gmu[mu]*expr))
|
||||
|
||||
/*******************************************************************************
|
||||
* Name: ContractConservedCurrentSiteFwd
|
||||
* Operation: (1/2) * q2[x] * U(x) * (g[mu] - 1) * q1[x + mu]
|
||||
* Notes: - DoubledGaugeField U assumed to contain -1/2 factor.
|
||||
* - Pass in q_in_1 shifted in +ve mu direction.
|
||||
******************************************************************************/
|
||||
template<class Impl>
|
||||
void WilsonKernels<Impl>::ContractConservedCurrentSiteFwd(
|
||||
const SitePropagator &q_in_1,
|
||||
const SitePropagator &q_in_2,
|
||||
SitePropagator &q_out,
|
||||
DoubledGaugeField &U,
|
||||
unsigned int sU,
|
||||
unsigned int mu,
|
||||
bool switch_sign)
|
||||
{
|
||||
SitePropagator result, tmp;
|
||||
Gamma g5(Gamma::Algebra::Gamma5);
|
||||
Impl::multLinkProp(tmp, U._odata[sU], q_in_1, mu);
|
||||
result = g5 * adj(q_in_2) * g5 * WilsonCurrentFwd(tmp, mu);
|
||||
if (switch_sign)
|
||||
{
|
||||
q_out -= result;
|
||||
}
|
||||
else
|
||||
{
|
||||
q_out += result;
|
||||
}
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* Name: ContractConservedCurrentSiteBwd
|
||||
* Operation: (1/2) * q2[x + mu] * U^dag(x) * (g[mu] + 1) * q1[x]
|
||||
* Notes: - DoubledGaugeField U assumed to contain -1/2 factor.
|
||||
* - Pass in q_in_2 shifted in +ve mu direction.
|
||||
******************************************************************************/
|
||||
template<class Impl>
|
||||
void WilsonKernels<Impl>::ContractConservedCurrentSiteBwd(
|
||||
const SitePropagator &q_in_1,
|
||||
const SitePropagator &q_in_2,
|
||||
SitePropagator &q_out,
|
||||
DoubledGaugeField &U,
|
||||
unsigned int sU,
|
||||
unsigned int mu,
|
||||
bool switch_sign)
|
||||
{
|
||||
SitePropagator result, tmp;
|
||||
Gamma g5(Gamma::Algebra::Gamma5);
|
||||
Impl::multLinkProp(tmp, U._odata[sU], q_in_1, mu + Nd);
|
||||
result = g5 * adj(q_in_2) * g5 * WilsonCurrentBwd(tmp, mu);
|
||||
if (switch_sign)
|
||||
{
|
||||
q_out += result;
|
||||
}
|
||||
else
|
||||
{
|
||||
q_out -= result;
|
||||
}
|
||||
}
|
||||
|
||||
// G-parity requires more specialised implementation.
|
||||
#define NO_CURR_SITE(Impl) \
|
||||
template <> \
|
||||
void WilsonKernels<Impl>::ContractConservedCurrentSiteFwd( \
|
||||
const SitePropagator &q_in_1, \
|
||||
const SitePropagator &q_in_2, \
|
||||
SitePropagator &q_out, \
|
||||
DoubledGaugeField &U, \
|
||||
unsigned int sU, \
|
||||
unsigned int mu, \
|
||||
bool switch_sign) \
|
||||
{ \
|
||||
assert(0); \
|
||||
} \
|
||||
template <> \
|
||||
void WilsonKernels<Impl>::ContractConservedCurrentSiteBwd( \
|
||||
const SitePropagator &q_in_1, \
|
||||
const SitePropagator &q_in_2, \
|
||||
SitePropagator &q_out, \
|
||||
DoubledGaugeField &U, \
|
||||
unsigned int mu, \
|
||||
unsigned int sU, \
|
||||
bool switch_sign) \
|
||||
{ \
|
||||
assert(0); \
|
||||
}
|
||||
|
||||
NO_CURR_SITE(GparityWilsonImplF);
|
||||
NO_CURR_SITE(GparityWilsonImplD);
|
||||
NO_CURR_SITE(GparityWilsonImplFH);
|
||||
NO_CURR_SITE(GparityWilsonImplDF);
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
* Name: SeqConservedCurrentSiteFwd
|
||||
* Operation: (1/2) * U(x) * (g[mu] - 1) * q[x + mu]
|
||||
* Notes: - DoubledGaugeField U assumed to contain -1/2 factor.
|
||||
* - Pass in q_in shifted in +ve mu direction.
|
||||
******************************************************************************/
|
||||
template<class Impl>
|
||||
void WilsonKernels<Impl>::SeqConservedCurrentSiteFwd(const SitePropagator &q_in,
|
||||
SitePropagator &q_out,
|
||||
DoubledGaugeField &U,
|
||||
unsigned int sU,
|
||||
unsigned int mu,
|
||||
vInteger t_mask,
|
||||
bool switch_sign)
|
||||
{
|
||||
SitePropagator result;
|
||||
Impl::multLinkProp(result, U._odata[sU], q_in, mu);
|
||||
result = WilsonCurrentFwd(result, mu);
|
||||
|
||||
// Zero any unwanted timeslice entries.
|
||||
result = predicatedWhere(t_mask, result, 0.*result);
|
||||
|
||||
if (switch_sign)
|
||||
{
|
||||
q_out -= result;
|
||||
}
|
||||
else
|
||||
{
|
||||
q_out += result;
|
||||
}
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* Name: SeqConservedCurrentSiteFwd
|
||||
* Operation: (1/2) * U^dag(x) * (g[mu] + 1) * q[x - mu]
|
||||
* Notes: - DoubledGaugeField U assumed to contain -1/2 factor.
|
||||
* - Pass in q_in shifted in -ve mu direction.
|
||||
******************************************************************************/
|
||||
template<class Impl>
|
||||
void WilsonKernels<Impl>::SeqConservedCurrentSiteBwd(const SitePropagator &q_in,
|
||||
SitePropagator &q_out,
|
||||
DoubledGaugeField &U,
|
||||
unsigned int sU,
|
||||
unsigned int mu,
|
||||
vInteger t_mask,
|
||||
bool switch_sign)
|
||||
{
|
||||
SitePropagator result;
|
||||
Impl::multLinkProp(result, U._odata[sU], q_in, mu + Nd);
|
||||
result = WilsonCurrentBwd(result, mu);
|
||||
|
||||
// Zero any unwanted timeslice entries.
|
||||
result = predicatedWhere(t_mask, result, 0.*result);
|
||||
|
||||
if (switch_sign)
|
||||
{
|
||||
q_out += result;
|
||||
}
|
||||
else
|
||||
{
|
||||
q_out -= result;
|
||||
}
|
||||
}
|
||||
|
||||
FermOpTemplateInstantiate(WilsonKernels);
|
||||
AdjointFermOpTemplateInstantiate(WilsonKernels);
|
||||
TwoIndexFermOpTemplateInstantiate(WilsonKernels);
|
||||
|
@ -180,6 +180,38 @@ public:
|
||||
void DhopDir(StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor * buf,
|
||||
int sF, int sU, const FermionField &in, FermionField &out, int dirdisp, int gamma);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Utilities for inserting Wilson conserved current.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
void ContractConservedCurrentSiteFwd(const SitePropagator &q_in_1,
|
||||
const SitePropagator &q_in_2,
|
||||
SitePropagator &q_out,
|
||||
DoubledGaugeField &U,
|
||||
unsigned int sU,
|
||||
unsigned int mu,
|
||||
bool switch_sign = false);
|
||||
void ContractConservedCurrentSiteBwd(const SitePropagator &q_in_1,
|
||||
const SitePropagator &q_in_2,
|
||||
SitePropagator &q_out,
|
||||
DoubledGaugeField &U,
|
||||
unsigned int sU,
|
||||
unsigned int mu,
|
||||
bool switch_sign = false);
|
||||
void SeqConservedCurrentSiteFwd(const SitePropagator &q_in,
|
||||
SitePropagator &q_out,
|
||||
DoubledGaugeField &U,
|
||||
unsigned int sU,
|
||||
unsigned int mu,
|
||||
vInteger t_mask,
|
||||
bool switch_sign = false);
|
||||
void SeqConservedCurrentSiteBwd(const SitePropagator &q_in,
|
||||
SitePropagator &q_out,
|
||||
DoubledGaugeField &U,
|
||||
unsigned int sU,
|
||||
unsigned int mu,
|
||||
vInteger t_mask,
|
||||
bool switch_sign = false);
|
||||
|
||||
private:
|
||||
// Specialised variants
|
||||
void GenericDhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteHalfSpinor * buf,
|
||||
|
@ -746,7 +746,7 @@ template<typename GaugeField,typename GaugeMat>
|
||||
}
|
||||
}
|
||||
template<typename GaugeField>
|
||||
static void ColdConfiguration(GridParallelRNG &pRNG,GaugeField &out){
|
||||
static void ColdConfiguration(GaugeField &out){
|
||||
typedef typename GaugeField::vector_type vector_type;
|
||||
typedef iSUnMatrix<vector_type> vMatrixType;
|
||||
typedef Lattice<vMatrixType> LatticeMatrixType;
|
||||
@ -757,6 +757,10 @@ template<typename GaugeField,typename GaugeMat>
|
||||
PokeIndex<LorentzIndex>(out,Umu,mu);
|
||||
}
|
||||
}
|
||||
template<typename GaugeField>
|
||||
static void ColdConfiguration(GridParallelRNG &pRNG,GaugeField &out){
|
||||
ColdConfiguration(out);
|
||||
}
|
||||
|
||||
template<typename LatticeMatrixType>
|
||||
static void taProj( const LatticeMatrixType &in, LatticeMatrixType &out){
|
||||
|
@ -25,7 +25,7 @@
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid.h>
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace Grid;
|
||||
using namespace std;
|
||||
|
@ -125,7 +125,11 @@ static inline void write(Writer<T> &WR,const std::string &s, const cname &obj){
|
||||
}\
|
||||
template <typename T>\
|
||||
static inline void read(Reader<T> &RD,const std::string &s, cname &obj){ \
|
||||
push(RD,s);\
|
||||
if (!push(RD,s))\
|
||||
{\
|
||||
std::cout << Grid::GridLogWarning << "IO: Cannot open node '" << s << "'" << std::endl;\
|
||||
return;\
|
||||
};\
|
||||
GRID_MACRO_EVAL(GRID_MACRO_MAP(GRID_MACRO_READ_MEMBER,__VA_ARGS__)) \
|
||||
pop(RD);\
|
||||
}\
|
||||
|
@ -100,13 +100,16 @@ XmlReader::XmlReader(const string &fileName,string toplev) : fileName_(fileName)
|
||||
|
||||
bool XmlReader::push(const string &s)
|
||||
{
|
||||
if (node_.child(s.c_str()))
|
||||
{
|
||||
node_ = node_.child(s.c_str());
|
||||
|
||||
if (node_.child(s.c_str()) == NULL )
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
|
||||
node_ = node_.child(s.c_str());
|
||||
return true;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
void XmlReader::pop(void)
|
||||
@ -117,20 +120,30 @@ void XmlReader::pop(void)
|
||||
bool XmlReader::nextElement(const std::string &s)
|
||||
{
|
||||
if (node_.next_sibling(s.c_str()))
|
||||
{
|
||||
node_ = node_.next_sibling(s.c_str());
|
||||
|
||||
return true;
|
||||
}
|
||||
{
|
||||
node_ = node_.next_sibling(s.c_str());
|
||||
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
template <>
|
||||
void XmlReader::readDefault(const string &s, string &output)
|
||||
{
|
||||
output = node_.child(s.c_str()).first_child().value();
|
||||
if (node_.child(s.c_str()))
|
||||
{
|
||||
output = node_.child(s.c_str()).first_child().value();
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << GridLogWarning << "XML: cannot open node '" << s << "'";
|
||||
std::cout << std::endl;
|
||||
|
||||
output = "";
|
||||
}
|
||||
}
|
||||
|
@ -39,6 +39,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
#include <cassert>
|
||||
|
||||
#include <Grid/pugixml/pugixml.h>
|
||||
#include <Grid/GridCore.h>
|
||||
|
||||
namespace Grid
|
||||
{
|
||||
@ -119,7 +120,6 @@ namespace Grid
|
||||
std::string buf;
|
||||
|
||||
readDefault(s, buf);
|
||||
// std::cout << s << " " << buf << std::endl;
|
||||
fromString(output, buf);
|
||||
}
|
||||
|
||||
@ -132,7 +132,13 @@ namespace Grid
|
||||
std::string buf;
|
||||
unsigned int i = 0;
|
||||
|
||||
push(s);
|
||||
if (!push(s))
|
||||
{
|
||||
std::cout << GridLogWarning << "XML: cannot open node '" << s << "'";
|
||||
std::cout << std::endl;
|
||||
|
||||
return;
|
||||
}
|
||||
while (node_.child("elem"))
|
||||
{
|
||||
output.resize(i + 1);
|
||||
|
@ -204,7 +204,7 @@ std::string GridCmdVectorIntToString(const std::vector<int> & vec){
|
||||
// Reinit guard
|
||||
/////////////////////////////////////////////////////////
|
||||
static int Grid_is_initialised = 0;
|
||||
|
||||
static MemoryStats dbgMemStats;
|
||||
|
||||
void Grid_init(int *argc,char ***argv)
|
||||
{
|
||||
@ -251,6 +251,11 @@ void Grid_init(int *argc,char ***argv)
|
||||
assert(fp!=(FILE *)NULL);
|
||||
}
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--debug-mem") ){
|
||||
MemoryProfiler::debug = true;
|
||||
MemoryProfiler::stats = &dbgMemStats;
|
||||
}
|
||||
|
||||
////////////////////////////////////
|
||||
// Banner
|
||||
////////////////////////////////////
|
||||
@ -324,6 +329,7 @@ void Grid_init(int *argc,char ***argv)
|
||||
std::cout<<GridLogMessage<<" --decomposition : report on default omp,mpi and simd decomposition"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --debug-signals : catch sigsegv and print a blame report"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --debug-stdout : print stdout from EVERY node"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --debug-mem : print Grid allocator activity"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --notimestamp : suppress millisecond resolution stamps"<<std::endl;
|
||||
std::cout<<GridLogMessage<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Performance:"<<std::endl;
|
||||
|
Reference in New Issue
Block a user