1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-14 05:07:05 +01:00

Compare commits

..

1 Commits

Author SHA1 Message Date
b284d50863 Checking in fixed adaptive WilsonFlow 2021-06-07 14:20:27 -04:00
26 changed files with 138 additions and 3289 deletions

2
.gitignore vendored
View File

@ -10,8 +10,6 @@
*~
*#
*.sublime-*
.ctags
tags
# Precompiled Headers #
#######################

View File

@ -240,14 +240,12 @@ public:
Field T0(grid); T0 = in;
Field T1(grid);
Field T2(grid);
Field Tout(grid);
Field y(grid);
Field *Tnm = &T0;
Field *Tn = &T1;
Field *Tnp = &T2;
std::cout << GridLogMessage << "Chebyshev() starts"<<std::endl;
// Tn=T1 = (xscale M + mscale)in
RealD xscale = 2.0/(hi-lo);
RealD mscale = -(hi+lo)/(hi-lo);
@ -256,7 +254,7 @@ public:
// sum = .5 c[0] T0 + c[1] T1
// out = ()*T0 + Coeffs[1]*T1;
axpby(Tout,0.5*Coeffs[0],Coeffs[1],T0,T1);
axpby(out,0.5*Coeffs[0],Coeffs[1],T0,T1);
for(int n=2;n<order;n++){
Linop.HermOp(*Tn,y);
@ -277,7 +275,7 @@ public:
axpby(y,xscale,mscale,y,(*Tn));
axpby(*Tnp,2.0,-1.0,y,(*Tnm));
if ( Coeffs[n] != 0.0) {
axpy(Tout,Coeffs[n],*Tnp,Tout);
axpy(out,Coeffs[n],*Tnp,out);
}
#endif
// Cycle pointers to avoid copies
@ -287,8 +285,6 @@ public:
Tnp =swizzle;
}
out = Tout;
std::cout << GridLogMessage << "Chebyshev() ends"<<std::endl;
}
};
@ -381,26 +377,24 @@ public:
Field T0(grid); T0 = in;
Field T1(grid);
Field T2(grid);
Field Tout(grid);
Field y(grid);
Field *Tnm = &T0;
Field *Tn = &T1;
Field *Tnp = &T2;
std::cout << GridLogMessage << "ChebyshevLanczos() starts"<<std::endl;
// Tn=T1 = (xscale M )*in
AminusMuSq(Linop,T0,T1);
// sum = .5 c[0] T0 + c[1] T1
Tout = (0.5*Coeffs[0])*T0 + Coeffs[1]*T1;
out = (0.5*Coeffs[0])*T0 + Coeffs[1]*T1;
for(int n=2;n<order;n++){
AminusMuSq(Linop,*Tn,y);
*Tnp=2.0*y-(*Tnm);
Tout=Tout+Coeffs[n]* (*Tnp);
out=out+Coeffs[n]* (*Tnp);
// Cycle pointers to avoid copies
Field *swizzle = Tnm;
@ -409,8 +403,6 @@ public:
Tnp =swizzle;
}
out=Tout;
std::cout << GridLogMessage << "ChebyshevLanczos() ends"<<std::endl;
}
};
NAMESPACE_END(Grid);

File diff suppressed because it is too large Load Diff

View File

@ -44,7 +44,7 @@ void MemoryManager::AcceleratorFree (void *ptr,size_t bytes)
if ( __freeme ) {
acceleratorFreeDevice(__freeme);
total_device-=bytes;
// PrintBytes();
// PrintBytes();
}
}
void *MemoryManager::SharedAllocate(size_t bytes)
@ -53,8 +53,8 @@ void *MemoryManager::SharedAllocate(size_t bytes)
if ( ptr == (void *) NULL ) {
ptr = (void *) acceleratorAllocShared(bytes);
total_shared+=bytes;
std::cout <<GridLogMessage<<"AcceleratorAllocate: allocated Shared pointer "<<std::hex<<ptr<<std::dec<<std::endl;
// PrintBytes();
// std::cout <<"AcceleratorAllocate: allocated Shared pointer "<<std::hex<<ptr<<std::dec<<std::endl;
// PrintBytes();
}
return ptr;
}
@ -74,7 +74,6 @@ void *MemoryManager::CpuAllocate(size_t bytes)
if ( ptr == (void *) NULL ) {
ptr = (void *) acceleratorAllocShared(bytes);
total_host+=bytes;
// std::cout << GridLogMessage<< "MemoryManager:: CpuAllocate total_host= "<<total_host<<" "<< ptr << std::endl;
}
return ptr;
}
@ -84,7 +83,6 @@ void MemoryManager::CpuFree (void *_ptr,size_t bytes)
void *__freeme = Insert(_ptr,bytes,Cpu);
if ( __freeme ) {
acceleratorFreeShared(__freeme);
// std::cout << GridLogMessage<< "MemoryManager:: CpuFree total_host= "<<total_host<<" "<< __freeme << std::endl;
total_host-=bytes;
}
}

View File

@ -35,9 +35,6 @@ Author: Christoph Lehner <christoph@lhnr.de>
#endif
#ifdef GRID_HIP
#include <hip/hip_runtime_api.h>
#endif
#ifdef GRID_SYCl
#endif
NAMESPACE_BEGIN(Grid);
@ -73,7 +70,6 @@ void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
WorldNodes = WorldSize/WorldShmSize;
assert( (WorldNodes * WorldShmSize) == WorldSize );
// FIXME: Check all WorldShmSize are the same ?
/////////////////////////////////////////////////////////////////////
@ -450,47 +446,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
////////////////////////////////////////////////////////////////////////////////////////////
// Hugetlbfs mapping intended
////////////////////////////////////////////////////////////////////////////////////////////
#if defined(GRID_CUDA) ||defined(GRID_HIP) || defined(GRID_SYCL)
//if defined(GRID_SYCL)
#if 0
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
void * ShmCommBuf ;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// allocate the pointer array for shared windows for our group
//////////////////////////////////////////////////////////////////////////////////////////////////////////
MPI_Barrier(WorldShmComm);
WorldShmCommBufs.resize(WorldShmSize);
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Each MPI rank should allocate our own buffer
///////////////////////////////////////////////////////////////////////////////////////////////////////////
ShmCommBuf = acceleratorAllocDevice(bytes);
if (ShmCommBuf == (void *)NULL ) {
std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
exit(EXIT_FAILURE);
}
std::cout << WorldRank << header " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
SharedMemoryZero(ShmCommBuf,bytes);
assert(WorldShmSize == 1);
for(int r=0;r<WorldShmSize;r++){
WorldShmCommBufs[r] = ShmCommBuf;
}
_ShmAllocBytes=bytes;
_ShmAlloc=1;
}
#endif
#if defined(GRID_CUDA) ||defined(GRID_HIP) ||defined(GRID_SYCL)
#if defined(GRID_CUDA) ||defined(GRID_HIP)
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
void * ShmCommBuf ;
@ -513,16 +469,8 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Each MPI rank should allocate our own buffer
///////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
auto zeDevice = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_device());
auto zeContext= cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_context());
ze_device_mem_alloc_desc_t zeDesc = {};
zeMemAllocDevice(zeContext,&zeDesc,bytes,2*1024*1024,zeDevice,&ShmCommBuf);
std::cout << WorldRank << header " SharedMemoryMPI.cc zeMemAllocDevice "<< bytes
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
#else
ShmCommBuf = acceleratorAllocDevice(bytes);
#endif
if (ShmCommBuf == (void *)NULL ) {
std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
exit(EXIT_FAILURE);
@ -532,8 +480,8 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
std::cout << WorldRank << header " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
}
// SharedMemoryZero(ShmCommBuf,bytes);
std::cout<< "Setting up IPC"<<std::endl;
SharedMemoryZero(ShmCommBuf,bytes);
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Loop over ranks/gpu's on our node
///////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -543,23 +491,6 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
//////////////////////////////////////////////////
// If it is me, pass around the IPC access key
//////////////////////////////////////////////////
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
ze_ipc_mem_handle_t handle;
if ( r==WorldShmRank ) {
auto err = zeMemGetIpcHandle(zeContext,ShmCommBuf,&handle);
if ( err != ZE_RESULT_SUCCESS ) {
std::cerr << "SharedMemoryMPI.cc zeMemGetIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
exit(EXIT_FAILURE);
} else {
std::cerr << "SharedMemoryMPI.cc zeMemGetIpcHandle succeeded for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
}
std::cerr<<"Allocated IpcHandle rank "<<r<<" (hex) ";
for(int c=0;c<ZE_MAX_IPC_HANDLE_SIZE;c++){
std::cerr<<std::hex<<(uint32_t)((uint8_t)handle.data[c])<<std::dec;
}
std::cerr<<std::endl;
}
#endif
#ifdef GRID_CUDA
cudaIpcMemHandle_t handle;
if ( r==WorldShmRank ) {
@ -596,25 +527,6 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
// If I am not the source, overwrite thisBuf with remote buffer
///////////////////////////////////////////////////////////////
void * thisBuf = ShmCommBuf;
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
if ( r!=WorldShmRank ) {
thisBuf = nullptr;
std::cerr<<"Using IpcHandle rank "<<r<<" ";
for(int c=0;c<ZE_MAX_IPC_HANDLE_SIZE;c++){
std::cerr<<std::hex<<(uint32_t)((uint8_t)handle.data[c])<<std::dec;
}
std::cerr<<std::endl;
auto err = zeMemOpenIpcHandle(zeContext,zeDevice,handle,0,&thisBuf);
if ( err != ZE_RESULT_SUCCESS ) {
std::cerr << "SharedMemoryMPI.cc "<<zeContext<<" "<<zeDevice<<std::endl;
std::cerr << "SharedMemoryMPI.cc zeMemOpenIpcHandle failed for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
exit(EXIT_FAILURE);
} else {
std::cerr << "SharedMemoryMPI.cc zeMemOpenIpcHandle succeeded for rank "<<r<<" "<<std::hex<<err<<std::dec<<std::endl;
}
assert(thisBuf!=nullptr);
}
#endif
#ifdef GRID_CUDA
if ( r!=WorldShmRank ) {
auto err = cudaIpcOpenMemHandle(&thisBuf,handle,cudaIpcMemLazyEnablePeerAccess);
@ -645,8 +557,6 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
_ShmAllocBytes=bytes;
_ShmAlloc=1;
}
#endif
#else
#ifdef GRID_MPI3_SHMMMAP
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
@ -817,16 +727,16 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
/////////////////////////////////////////////////////////////////////////
void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
{
#if defined(GRID_CUDA) || defined(GRID_HIP) || defined(GRID_SYCL)
acceleratorMemSet(dest,0,bytes);
#ifdef GRID_CUDA
cudaMemset(dest,0,bytes);
#else
bzero(dest,bytes);
#endif
}
void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes)
{
#if defined(GRID_CUDA) || defined(GRID_HIP) || defined(GRID_SYCL)
acceleratorCopyToDevice(src,dest,bytes);
#ifdef GRID_CUDA
cudaMemcpy(dest,src,bytes,cudaMemcpyDefault);
#else
bcopy(src,dest,bytes);
#endif
@ -890,7 +800,7 @@ void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
}
#endif
//SharedMemoryTest();
SharedMemoryTest();
}
//////////////////////////////////////////////////////////////////
// On node barrier

View File

@ -198,7 +198,7 @@ public:
std::cerr << " nersc_csum " <<std::hex<< nersc_csum << " " << header.checksum<< std::dec<< std::endl;
exit(0);
}
assert(fabs(clone.plaquette -header.plaquette ) < 1.0e-1 );
assert(fabs(clone.plaquette -header.plaquette ) < 1.0e-5 );
assert(fabs(clone.link_trace-header.link_trace) < 1.0e-6 );
assert(nersc_csum == header.checksum );

View File

@ -6,7 +6,8 @@ Source file: ./lib/qcd/modules/plaquette.h
Copyright (C) 2017
Author: Guido Cossu <guido.cossu@ed.ac.uk>
Author: Guido Cossu <guido.cossu@ed.ac.uk>
Author: Chulwoo Jung <chulwoo@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -35,7 +36,7 @@ template <class Gimpl>
class WilsonFlow: public Smear<Gimpl>{
unsigned int Nstep;
unsigned int measure_interval;
mutable RealD epsilon, taus;
mutable RealD epsilon, taus,tolerance;
mutable WilsonGaugeAction<Gimpl> SG;
@ -47,13 +48,15 @@ class WilsonFlow: public Smear<Gimpl>{
public:
INHERIT_GIMPL_TYPES(Gimpl)
explicit WilsonFlow(unsigned int Nstep, RealD epsilon, unsigned int interval = 1):
explicit WilsonFlow(unsigned int Nstep, RealD epsilon, unsigned int interval = 1, RealD tol = 1e-3):
Nstep(Nstep),
epsilon(epsilon),
tolerance(tol),
measure_interval(interval),
SG(WilsonGaugeAction<Gimpl>(3.0)) {
// WilsonGaugeAction with beta 3.0
assert(epsilon > 0.0);
assert(tolerance > 0.0);
LogMessage();
}
@ -64,6 +67,8 @@ public:
<< "[WilsonFlow] epsilon : " << epsilon << std::endl;
std::cout << GridLogMessage
<< "[WilsonFlow] full trajectory : " << Nstep * epsilon << std::endl;
std::cout << GridLogMessage
<< "[WilsonFlow] tolerance : " << tolerance << std::endl;
}
virtual void smear(GaugeField&, const GaugeField&) const;
@ -106,11 +111,14 @@ void WilsonFlow<Gimpl>::evolve_step_adaptive(typename Gimpl::GaugeField &U, Real
if (maxTau - taus < epsilon){
epsilon = maxTau-taus;
}
//std::cout << GridLogMessage << "Integration epsilon : " << epsilon << std::endl;
std::cout << GridLogMessage << "Integration epsilon : " << epsilon << std::endl;
GaugeField Z(U.Grid());
GaugeField Zprime(U.Grid());
GaugeField tmp(U.Grid()), Uprime(U.Grid());
GaugeField tmp(U.Grid()), Uprime(U.Grid()),Usave(U.Grid());
Uprime = U;
Usave = U;
SG.deriv(U, Z);
Zprime = -Z;
Z *= 0.25; // Z0 = 1/4 * F(U)
@ -128,18 +136,33 @@ void WilsonFlow<Gimpl>::evolve_step_adaptive(typename Gimpl::GaugeField &U, Real
Z *= 3.0/4.0; // Z = 17/36*Z0 -8/9*Z1 +3/4*Z2
Gimpl::update_field(Z, U, -2.0*epsilon); // V(t+e) = exp(ep*Z)*W2
// Ramos
// Ramos arXiv:1301.4388
Gimpl::update_field(Zprime, Uprime, -2.0*epsilon); // V'(t+e) = exp(ep*Z')*W0
// Compute distance as norm^2 of the difference
GaugeField diffU = U - Uprime;
RealD diff = norm2(diffU);
// adjust integration step
// Wrong
// RealD diff = norm2(diffU);
// std::cout << GridLogMessage << "norm2: " << diff << std::endl;
// RealD tol=1e-3;
RealD diff = real(rankInnerMax(diffU,diffU));
diff = sqrt(diff)/18.; // distance defined in Ramos
GridBase *grid = diffU.Grid();
std::cout << GridLogMessage << "max: " << diff << std::endl;
grid->GlobalMax(diff);
std::cout << GridLogMessage << "max: " << diff << std::endl;
if(diff < tolerance) {
taus += epsilon;
//std::cout << GridLogMessage << "Adjusting integration step with distance: " << diff << std::endl;
// std::cout << GridLogMessage << "Adjusting integration step with distance: " << diff << std::endl;
} else {
U = Usave;
}
epsilon = epsilon*0.95*std::pow(1e-4/diff,1./3.);
//std::cout << GridLogMessage << "New epsilon : " << epsilon << std::endl;
epsilon = epsilon*0.95*std::pow(tolerance/diff,1./3.);
std::cout << GridLogMessage << "Distance : "<<diff<<"New epsilon : " << epsilon << std::endl;
}
@ -184,8 +207,11 @@ void WilsonFlow<Gimpl>::smear(GaugeField& out, const GaugeField& in) const {
template <class Gimpl>
void WilsonFlow<Gimpl>::smear_adaptive(GaugeField& out, const GaugeField& in, RealD maxTau){
out = in;
taus = epsilon;
// taus = epsilon;
taus = 0.;
unsigned int step = 0;
double measTau = epsilon*measure_interval;
std::cout << GridLogMessage << "measTau :"<< measTau << std::endl;
do{
step++;
//std::cout << GridLogMessage << "Evolution time :"<< taus << std::endl;
@ -193,10 +219,12 @@ void WilsonFlow<Gimpl>::smear_adaptive(GaugeField& out, const GaugeField& in, Re
std::cout << GridLogMessage << "[WilsonFlow] Energy density (plaq) : "
<< step << " " << taus << " "
<< energyDensityPlaquette(out) << std::endl;
if( step % measure_interval == 0){
// if( step % measure_interval == 0){
if( taus > measTau ) {
std::cout << GridLogMessage << "[WilsonFlow] Top. charge : "
<< step << " "
<< WilsonLoops<PeriodicGimplR>::TopologicalCharge(out) << std::endl;
measTau += epsilon*measure_interval;
}
} while (taus < maxTau);

View File

@ -1,5 +1,5 @@
/*************************************************************************************
n
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/tensors/Tensor_extract_merge.h
@ -153,7 +153,7 @@ void insertLane(int lane, vobj & __restrict__ vec,const typename vobj::scalar_ob
// Extract to a bunch of scalar object pointers of different scalar type, with offset. Useful for precision change
////////////////////////////////////////////////////////////////////////
template<class vobj, class sobj> accelerator
void extract(const vobj &vec,const ExtractPointerArray<sobj> &extracted, int offset)
void extract(const vobj &vec,ExtractPointerArray<sobj> &extracted, int offset)
{
typedef typename GridTypeMapper<sobj>::scalar_type sobj_scalar_type;
typedef typename GridTypeMapper<vobj>::scalar_type scalar_type;
@ -181,7 +181,7 @@ void extract(const vobj &vec,const ExtractPointerArray<sobj> &extracted, int off
// Merge bunch of scalar object pointers of different scalar type, with offset. Useful for precision change
////////////////////////////////////////////////////////////////////////
template<class vobj, class sobj> accelerator
void merge(vobj &vec,const ExtractPointerArray<sobj> &extracted, int offset)
void merge(vobj &vec,ExtractPointerArray<sobj> &extracted, int offset)
{
typedef typename GridTypeMapper<sobj>::scalar_type sobj_scalar_type;
typedef typename GridTypeMapper<vobj>::scalar_type scalar_type;

View File

@ -171,6 +171,7 @@ void acceleratorInit(void)
#ifdef GRID_SYCL
cl::sycl::queue *theGridAccelerator;
void acceleratorInit(void)
{
int nDevices = 1;
@ -178,10 +179,6 @@ void acceleratorInit(void)
cl::sycl::device selectedDevice { selector };
theGridAccelerator = new sycl::queue (selectedDevice);
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
zeInit(0);
#endif
char * localRankStr = NULL;
int rank = 0, world_rank=0;
#define ENV_LOCAL_RANK_OMPI "OMPI_COMM_WORLD_LOCAL_RANK"

View File

@ -39,10 +39,6 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#ifdef HAVE_MM_MALLOC_H
#include <mm_malloc.h>
#endif
#ifdef __APPLE__
// no memalign
inline void *memalign(size_t align, size_t bytes) { return malloc(bytes); }
#endif
NAMESPACE_BEGIN(Grid);
@ -198,9 +194,6 @@ inline void *acceleratorAllocShared(size_t bytes)
ptr = (void *) NULL;
printf(" cudaMallocManaged failed for %d %s \n",bytes,cudaGetErrorString(err));
}
size_t free,total;
cudaMemGetInfo(&free,&total);
std::cout<<"cudaMemGetInfo "<<free<<" / "<<total<<std::endl;
return ptr;
};
inline void *acceleratorAllocDevice(size_t bytes)
@ -211,9 +204,6 @@ inline void *acceleratorAllocDevice(size_t bytes)
ptr = (void *) NULL;
printf(" cudaMalloc failed for %d %s \n",bytes,cudaGetErrorString(err));
}
size_t free,total;
cudaMemGetInfo(&free,&total);
std::cout<<"cudaMemGetInfo "<<free<<" / "<<total<<std::endl;
return ptr;
};
inline void acceleratorFreeShared(void *ptr){ cudaFree(ptr);};
@ -243,13 +233,6 @@ inline int acceleratorIsCommunicable(void *ptr)
NAMESPACE_END(Grid);
#include <CL/sycl.hpp>
#include <CL/sycl/usm.hpp>
#define GRID_SYCL_LEVEL_ZERO_IPC
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
#include <level_zero/ze_api.h>
#include <CL/sycl/backend/level_zero.hpp>
#endif
NAMESPACE_BEGIN(Grid);
extern cl::sycl::queue *theGridAccelerator;
@ -274,14 +257,11 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) {
unsigned long nt=acceleratorThreads(); \
unsigned long unum1 = num1; \
unsigned long unum2 = num2; \
if(nt < 8)nt=8; \
cl::sycl::range<3> local {nt,1,nsimd}; \
cl::sycl::range<3> global{unum1,unum2,nsimd}; \
cgh.parallel_for<class dslash>( \
cl::sycl::nd_range<3>(global,local), \
[=] (cl::sycl::nd_item<3> item) /*mutable*/ \
[[intel::reqd_sub_group_size(8)]] \
{ \
[=] (cl::sycl::nd_item<3> item) /*mutable*/ { \
auto iter1 = item.get_global_id(0); \
auto iter2 = item.get_global_id(1); \
auto lane = item.get_global_id(2); \
@ -429,8 +409,6 @@ inline void acceleratorMemSet(void *base,int value,size_t bytes) { hipMemset(bas
#undef GRID_SIMT
#define accelerator
#define accelerator_inline strong_inline
#define accelerator_for(iterator,num,nsimd, ... ) thread_for(iterator, num, { __VA_ARGS__ });

View File

@ -56,8 +56,6 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
static int
feenableexcept (unsigned int excepts)
{
#if 0
// Fails on Apple M1
static fenv_t fenv;
unsigned int new_excepts = excepts & FE_ALL_EXCEPT;
unsigned int old_excepts; // previous masks
@ -72,8 +70,6 @@ feenableexcept (unsigned int excepts)
iold_excepts = (int) old_excepts;
return ( fesetenv (&fenv) ? -1 : iold_excepts );
#endif
return 0;
}
#endif
@ -167,13 +163,6 @@ void GridCmdOptionInt(std::string &str,int & val)
return;
}
// ypj [add]
void GridCmdOptionFloat(std::string &str,double & val)
{
std::stringstream ss(str);
ss>>val;
return;
}
void GridParseLayout(char **argv,int argc,
Coordinate &latt_c,

View File

@ -57,8 +57,7 @@ void GridCmdOptionCSL(std::string str,std::vector<std::string> & vec);
template<class VectorInt>
void GridCmdOptionIntVector(const std::string &str,VectorInt & vec);
void GridCmdOptionInt(std::string &str,int & val);
// ypj [add]
void GridCmdOptionFloat(std::string &str,double & val);
void GridParseLayout(char **argv,int argc,
std::vector<int> &latt,

View File

@ -1,5 +1,5 @@
# additional include paths necessary to compile the C++ library
SUBDIRS = Grid HMC benchmarks tests examples
SUBDIRS = Grid HMC benchmarks tests
include $(top_srcdir)/doxygen.inc

View File

@ -133,30 +133,34 @@ public:
std::vector<HalfSpinColourVectorD *> xbuf(8);
std::vector<HalfSpinColourVectorD *> rbuf(8);
//Grid.ShmBufferFreeAll();
uint64_t bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD);
Grid.ShmBufferFreeAll();
for(int d=0;d<8;d++){
xbuf[d] = (HalfSpinColourVectorD *)acceleratorAllocDevice(bytes);
rbuf[d] = (HalfSpinColourVectorD *)acceleratorAllocDevice(bytes);
xbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
rbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
// bzero((void *)xbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
// bzero((void *)rbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
}
int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD);
int ncomm;
double dbytes;
std::vector<double> times(Nloop);
for(int i=0;i<Nloop;i++){
for(int dir=0;dir<8;dir++) {
int mu =dir % 4;
if (mpi_layout[mu]>1 ) {
double start=usecond();
std::vector<double> times(Nloop);
for(int i=0;i<Nloop;i++){
dbytes=0;
ncomm=0;
dbytes=0;
double start=usecond();
thread_for(dir,8,{
double tbytes;
int mu =dir % 4;
if (mpi_layout[mu]>1 ) {
int xmit_to_rank;
int recv_from_rank;
if ( dir == mu ) {
int comm_proc=1;
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
@ -164,38 +168,40 @@ public:
int comm_proc = mpi_layout[mu]-1;
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
}
Grid.SendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,
(void *)&rbuf[dir][0], recv_from_rank,
bytes);
dbytes+=bytes;
double stop=usecond();
t_time[i] = stop-start; // microseconds
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,
(void *)&rbuf[dir][0], recv_from_rank,
bytes,dir);
thread_critical {
ncomm++;
dbytes+=tbytes;
}
}
timestat.statistics(t_time);
dbytes=dbytes*ppn;
double xbytes = dbytes*0.5;
double bidibytes = dbytes;
std::cout<<GridLogMessage << lat<<"\t"<<Ls<<"\t "
<< bytes << " \t "
<<xbytes/timestat.mean<<" \t "<< xbytes*timestat.err/(timestat.mean*timestat.mean)<< " \t "
<<xbytes/timestat.max <<" "<< xbytes/timestat.min
<< "\t\t"<< bidibytes/timestat.mean<< " " << bidibytes*timestat.err/(timestat.mean*timestat.mean) << " "
<< bidibytes/timestat.max << " " << bidibytes/timestat.min << std::endl;
}
});
Grid.Barrier();
double stop=usecond();
t_time[i] = stop-start; // microseconds
}
for(int d=0;d<8;d++){
acceleratorFreeDevice(xbuf[d]);
acceleratorFreeDevice(rbuf[d]);
}
}
}
timestat.statistics(t_time);
dbytes=dbytes*ppn;
double xbytes = dbytes*0.5;
// double rbytes = dbytes*0.5;
double bidibytes = dbytes;
std::cout<<GridLogMessage << lat<<"\t"<<Ls<<"\t "
<< bytes << " \t "
<<xbytes/timestat.mean<<" \t "<< xbytes*timestat.err/(timestat.mean*timestat.mean)<< " \t "
<<xbytes/timestat.max <<" "<< xbytes/timestat.min
<< "\t\t"<< bidibytes/timestat.mean<< " " << bidibytes*timestat.err/(timestat.mean*timestat.mean) << " "
<< bidibytes/timestat.max << " " << bidibytes/timestat.min << std::endl;
}
}
return;
}
static void Memory(void)
@ -275,6 +281,7 @@ public:
uint64_t lmax=32;
#define NLOOP (1000*lmax*lmax*lmax*lmax/lat/lat/lat/lat)
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
for(int lat=8;lat<=lmax;lat+=8){
@ -438,7 +445,7 @@ public:
// 1344= 3*(2*8+6)*2*8 + 8*3*2*2 + 3*4*2*8
// 1344 = Nc* (6+(Nc-1)*8)*2*Nd + Nd*Nc*2*2 + Nd*Nc*Ns*2
// double flops=(1344.0*volume)/2;
#if 0
#if 1
double fps = Nc* (6+(Nc-1)*8)*Ns*Nd + Nd*Nc*Ns + Nd*Nc*Ns*2;
#else
double fps = Nc* (6+(Nc-1)*8)*Ns*Nd + 2*Nd*Nc*Ns + 2*Nd*Nc*Ns*2;
@ -709,12 +716,12 @@ int main (int argc, char ** argv)
if ( do_su4 ) {
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
std::cout<<GridLogMessage << " SU(4) benchmark " <<std::endl;
std::cout<<GridLogMessage << " Memory benchmark " <<std::endl;
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
Benchmark::SU4();
}
if ( do_comms ) {
if ( do_comms && (NN>1) ) {
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;
std::cout<<GridLogMessage << " Communications benchmark " <<std::endl;
std::cout<<GridLogMessage << "=================================================================================="<<std::endl;

View File

@ -815,7 +815,6 @@ AC_CONFIG_FILES(tests/smearing/Makefile)
AC_CONFIG_FILES(tests/qdpxx/Makefile)
AC_CONFIG_FILES(tests/testu01/Makefile)
AC_CONFIG_FILES(benchmarks/Makefile)
AC_CONFIG_FILES(examples/Makefile)
AC_OUTPUT
echo ""

View File

@ -1,396 +0,0 @@
#include <Grid/Grid.h>
using namespace Grid;
/*
/////////////////////////////////////////////////////////////////////////////////////////////
// Grid/algorithms/SparseMatrix.h: Interface defining what I expect of a general sparse matrix, such as a Fermion action
/////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class SparseMatrixBase {
public:
virtual GridBase *Grid(void) =0;
virtual void M (const Field &in, Field &out)=0;
virtual void Mdag (const Field &in, Field &out)=0;
virtual void MdagM(const Field &in, Field &out) {
Field tmp (in.Grid());
M(in,tmp);
Mdag(tmp,out);
}
virtual void Mdiag (const Field &in, Field &out)=0;
virtual void Mdir (const Field &in, Field &out,int dir, int disp)=0;
virtual void MdirAll (const Field &in, std::vector<Field> &out)=0;
};
*/
const std::vector<int> directions ({Xdir,Ydir,Zdir,Xdir,Ydir,Zdir});
const std::vector<int> displacements({1,1,1,-1,-1,-1});
template<class Field> class FreeLaplacianCshift : public SparseMatrixBase<Field>
{
public:
GridBase *grid;
FreeLaplacianCshift(GridBase *_grid)
{
grid=_grid;
};
virtual GridBase *Grid(void) { return grid; };
virtual void M (const Field &in, Field &out)
{
out = Zero();
for(int mu=0;mu<Nd-1;mu++) {
out = out + Cshift(in,mu,1) + Cshift(in,mu,-1) - 2.0 * in;
}
};
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
};
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
{
public:
INHERIT_GIMPL_TYPES(Gimpl);
GridBase *grid;
GaugeField U;
CovariantLaplacianCshift(GaugeField &_U) :
grid(_U.Grid()),
U(_U) { };
virtual GridBase *Grid(void) { return grid; };
virtual void M (const Field &in, Field &out)
{
out=Zero();
for(int mu=0;mu<Nd-1;mu++) {
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
out = out + Gimpl::CovShiftForward(Umu,mu,in);
out = out + Gimpl::CovShiftBackward(Umu,mu,in);
out = out - 2.0*in;
}
};
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
};
#define LEG_LOAD(Dir) \
SE = st.GetEntry(ptype, Dir, ss); \
if (SE->_is_local ) { \
int perm= SE->_permute; \
chi = coalescedReadPermute(in[SE->_offset],ptype,perm,lane); \
} else { \
chi = coalescedRead(buf[SE->_offset],lane); \
} \
acceleratorSynchronise();
template<class Field> class FreeLaplacianStencil : public SparseMatrixBase<Field>
{
public:
typedef typename Field::vector_object siteObject;
typedef CartesianStencil<siteObject, siteObject, int> StencilImpl;
GridBase *grid;
StencilImpl Stencil;
SimpleCompressor<siteObject> Compressor;
FreeLaplacianStencil(GridBase *_grid)
: Stencil (_grid,6,Even,directions,displacements,0), grid(_grid)
{ };
virtual GridBase *Grid(void) { return grid; };
virtual void M (const Field &_in, Field &_out)
{
///////////////////////////////////////////////
// Halo exchange for this geometry of stencil
///////////////////////////////////////////////
Stencil.HaloExchange(_in, Compressor);
///////////////////////////////////
// Arithmetic expressions
///////////////////////////////////
// Views; device friendly/accessible pointers
auto st = Stencil.View(AcceleratorRead);
auto buf = st.CommBuf();
autoView( in , _in , AcceleratorRead);
autoView( out , _out , AcceleratorWrite);
typedef typename Field::vector_object vobj;
typedef decltype(coalescedRead(in[0])) calcObj;
const int Nsimd = vobj::Nsimd();
const uint64_t NN = grid->oSites();
accelerator_for( ss, NN, Nsimd, {
StencilEntry *SE;
const int lane=acceleratorSIMTlane(Nsimd);
calcObj chi;
calcObj res;
int ptype;
res = coalescedRead(in[ss])*(-6.0);
LEG_LOAD(0); res = res + chi;
LEG_LOAD(1); res = res + chi;
LEG_LOAD(2); res = res + chi;
LEG_LOAD(3); res = res + chi;
LEG_LOAD(4); res = res + chi;
LEG_LOAD(5); res = res + chi;
coalescedWrite(out[ss], res,lane);
});
};
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
};
template<class Gimpl,class Field> class CovariantLaplacianStencil : public SparseMatrixBase<Field>
{
public:
INHERIT_GIMPL_TYPES(Gimpl);
typedef typename Field::vector_object siteObject;
template <typename vtype> using iImplDoubledGaugeField = iVector<iScalar<iMatrix<vtype, Nc> >, Nds>;
typedef iImplDoubledGaugeField<Simd> SiteDoubledGaugeField;
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
typedef CartesianStencil<siteObject, siteObject, int> StencilImpl;
GridBase *grid;
StencilImpl Stencil;
SimpleCompressor<siteObject> Compressor;
DoubledGaugeField Uds;
CovariantLaplacianStencil(GaugeField &Umu)
:
grid(Umu.Grid()),
Stencil (grid,6,Even,directions,displacements,0),
Uds(grid)
{
for (int mu = 0; mu < Nd; mu++) {
auto U = PeekIndex<LorentzIndex>(Umu, mu);
PokeIndex<LorentzIndex>(Uds, U, mu );
U = adj(Cshift(U, mu, -1));
PokeIndex<LorentzIndex>(Uds, U, mu + 4);
}
};
virtual GridBase *Grid(void) { return grid; };
virtual void M (const Field &_in, Field &_out)
{
///////////////////////////////////////////////
// Halo exchange for this geometry of stencil
///////////////////////////////////////////////
Stencil.HaloExchange(_in, Compressor);
///////////////////////////////////
// Arithmetic expressions
///////////////////////////////////
auto st = Stencil.View(AcceleratorRead);
auto buf = st.CommBuf();
autoView( in , _in , AcceleratorRead);
autoView( out , _out , AcceleratorWrite);
autoView( U , Uds , AcceleratorRead);
typedef typename Field::vector_object vobj;
typedef decltype(coalescedRead(in[0])) calcObj;
typedef decltype(coalescedRead(U[0](0))) calcLink;
const int Nsimd = vobj::Nsimd();
const uint64_t NN = grid->oSites();
accelerator_for( ss, NN, Nsimd, {
StencilEntry *SE;
const int lane=acceleratorSIMTlane(Nsimd);
calcObj chi;
calcObj res;
calcObj Uchi;
calcLink UU;
int ptype;
res = coalescedRead(in[ss])*(-6.0);
#define LEG_LOAD_MULT(leg,polarisation) \
UU = coalescedRead(U[ss](polarisation)); \
LEG_LOAD(leg); \
mult(&Uchi(), &UU, &chi()); \
res = res + Uchi;
LEG_LOAD_MULT(0,Xp);
LEG_LOAD_MULT(1,Yp);
LEG_LOAD_MULT(2,Zp);
LEG_LOAD_MULT(3,Xm);
LEG_LOAD_MULT(4,Ym);
LEG_LOAD_MULT(5,Zm);
coalescedWrite(out[ss], res,lane);
});
};
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
};
#undef LEG_LOAD_MULT
#undef LEG_LOAD
int main(int argc, char ** argv)
{
Grid_init(&argc, &argv);
typedef LatticeColourVector Field;
auto latt_size = GridDefaultLatt();
auto simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
auto mpi_layout = GridDefaultMpi();
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
GridParallelRNG RNG(&Grid); RNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
FreeLaplacianCshift<Field> FLcs(&Grid);
FreeLaplacianStencil<Field> FLst(&Grid);
LatticeGaugeField U(&Grid);
SU<Nc>::ColdConfiguration(RNG,U);
std::cout << " Gauge field has norm " <<norm2(U)<<std::endl;
CovariantLaplacianCshift <PeriodicGimplR,Field> CLcs(U);
CovariantLaplacianStencil<PeriodicGimplR,Field> CLst(U);
Field in(&Grid); gaussian(RNG,in);
Field out_FLcs(&Grid);
Field out_FLst(&Grid);
Field out_CLcs(&Grid);
Field out_CLst(&Grid);
Field diff(&Grid);
////////////////////////////////////////////////////////
// First test: in free field these should all agree
////////////////////////////////////////////////////////
FLcs.M(in,out_FLcs);
FLst.M(in,out_FLst);
CLcs.M(in,out_CLcs);
CLst.M(in,out_CLst);
std:: cout << "******************************************************************" <<std::endl;
std:: cout << " Test A: consistency of four different Laplacian implementations " <<std::endl;
std:: cout << "******************************************************************" <<std::endl;
std:: cout << " Input test vector " <<norm2(in)<<std::endl;
std:: cout << "--------------------------------------------------------" <<std::endl;
std:: cout << " Free cshift output vector " <<norm2(out_FLcs)<<std::endl;
std:: cout << " Free stencil output vector " <<norm2(out_FLst)<<std::endl;
std:: cout << " Cov cshift output vector " <<norm2(out_CLcs)<<std::endl;
std:: cout << " Cov stencil output vector " <<norm2(out_CLst)<<std::endl;
std:: cout << "--------------------------------------------------------" <<std::endl;
diff = out_FLcs - out_FLst;
std:: cout << " Difference between free Cshift Laplacian and free Stencil Laplacian = " <<norm2(diff)<<std::endl;
diff = out_FLcs - out_CLcs;
std:: cout << " Difference between free Cshift Laplacian and covariant Cshift Laplacian = " <<norm2(diff)<<std::endl;
diff = out_FLcs - out_CLst;
std:: cout << " Difference between free Cshift Laplacian and covariant Stencil Laplacian = " <<norm2(diff)<<std::endl;
std:: cout << "--------------------------------------------------------" <<std::endl;
std:: cout << "******************************************************************" <<std::endl;
std:: cout << " Test B: gauge covariance " <<std::endl;
std:: cout << "******************************************************************" <<std::endl;
LatticeGaugeField U_GT(&Grid); // Gauge transformed field
LatticeColourMatrix g(&Grid); // local Gauge xform matrix
U_GT = U;
// Make a random xform to teh gauge field
SU<Nc>::RandomGaugeTransform(RNG,U_GT,g); // Unit gauge
Field in_GT(&Grid);
Field out_GT(&Grid);
Field out_CLcs_GT(&Grid);
Field out_CLst_GT(&Grid);
CovariantLaplacianCshift <PeriodicGimplR,Field> CLcs_GT(U_GT);
CovariantLaplacianStencil<PeriodicGimplR,Field> CLst_GT(U_GT);
in_GT = g*in;
out_GT = g*out_FLcs;
// Check M^GT_xy in_GT = g(x) M_xy g^dag(y) g(y) in = g(x) out(x)
CLcs_GT.M(in_GT,out_CLcs_GT);
CLst_GT.M(in_GT,out_CLst_GT);
diff = out_CLcs_GT - out_GT;
std:: cout << " Difference between Gauge xformed result and covariant Cshift Laplacian in xformed gauge = " <<norm2(diff)<<std::endl;
diff = out_CLst_GT - out_GT;
std:: cout << " Difference between Gauge xformed result and covariant Stencil Laplacian in xformed gauge = " <<norm2(diff)<<std::endl;
std:: cout << "--------------------------------------------------------" <<std::endl;
std:: cout << "******************************************************************" <<std::endl;
std:: cout << " Test C: compare in free Field to \"Feynman rule\" " <<std::endl;
std:: cout << "******************************************************************" <<std::endl;
std::vector<int> dim_mask({1,1,1,0}); // 3d FFT
FFT theFFT(&Grid);
Field out(&Grid);
Field F_out(&Grid);
Field F_in(&Grid);
// FFT the random input vector
theFFT.FFT_dim_mask(F_in,in,dim_mask,FFT::forward);
// Convolution theorem: multiply by Fourier representation of (discrete) Laplacian to apply diff op
LatticeComplexD lap(&Grid); lap = Zero();
LatticeComplexD kmu(&Grid);
ComplexD ci(0.0,1.0);
for(int mu=0;mu<3;mu++) {
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
LatticeCoordinate(kmu,mu);
kmu = TwoPiL * kmu;
// (e^ik_mu + e^-ik_mu - 2) = 2( cos kmu - 1) ~ 2 (1 - k_mu^2/2 -1 ) = - k_mu^2 + O(k^4)
lap = lap + 2.0*cos(kmu) - 2.0;
}
F_out = lap * F_in;
// Inverse FFT the result
theFFT.FFT_dim_mask(out,F_out,dim_mask,FFT::backward);
std::cout<<"Fourier xformed (in) "<<norm2(F_in)<<std::endl;
std::cout<<"Fourier xformed Laplacian x (in) "<<norm2(F_out)<<std::endl;
std::cout<<"Momentum space Laplacian application "<< norm2(out)<<std::endl;
std::cout<<"Stencil Laplacian application "<< norm2(out_CLcs)<<std::endl;
diff = out_CLcs - out;
std::cout<<"diff "<< norm2(diff)<<std::endl;
Grid_finalize();
}

View File

@ -1,127 +0,0 @@
#include <Grid/Grid.h>
using namespace Grid;
// Function used for Chebyshev smearing
//
Real MomentumSmearing(Real p2)
{
return (1 - 4.0*p2) * exp(-p2/4);
}
Real DistillationSmearing(Real p2)
{
if ( p2 > 0.5 ) return 0.0;
else return 1.0;
}
// Flip sign to make prop to p^2, not -p^2 relative to last example
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
{
public:
INHERIT_GIMPL_TYPES(Gimpl);
GridBase *grid;
GaugeField U;
CovariantLaplacianCshift(GaugeField &_U) :
grid(_U.Grid()),
U(_U) { };
virtual GridBase *Grid(void) { return grid; };
virtual void M (const Field &in, Field &out)
{
out=Zero();
for(int mu=0;mu<Nd-1;mu++) {
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
out = out - Gimpl::CovShiftForward(Umu,mu,in);
out = out - Gimpl::CovShiftBackward(Umu,mu,in);
out = out + 2.0*in;
}
};
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
};
int main(int argc, char ** argv)
{
Grid_init(&argc, &argv);
typedef LatticeColourVector Field;
auto latt_size = GridDefaultLatt();
auto simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
auto mpi_layout = GridDefaultMpi();
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
GridParallelRNG RNG(&Grid); RNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
LatticeGaugeField U(&Grid);
SU<Nc>::ColdConfiguration(RNG,U);
typedef CovariantLaplacianCshift <PeriodicGimplR,Field> Laplacian_t;
Laplacian_t Laplacian(U);
ColourVector ColourKronecker;
ColourKronecker = Zero();
ColourKronecker()()(0) = 1.0;
Coordinate site({latt_size[0]/2,
latt_size[1]/2,
latt_size[2]/2,
0});
Field kronecker(&Grid);
kronecker = Zero();
pokeSite(ColourKronecker,kronecker,site);
Field psi(&Grid), chi(&Grid);
//////////////////////////////////////
// Classic Wuppertal smearing
//////////////////////////////////////
Integer Iterations = 80;
Real width = 2.0;
Real coeff = (width*width) / Real(4*Iterations);
chi=kronecker;
// chi = (1-p^2/2N)^N kronecker
for(int n = 0; n < Iterations; ++n) {
Laplacian.M(chi,psi);
chi = chi - coeff*psi;
}
std::cout << " Wuppertal smeared operator is chi = \n" << chi <<std::endl;
/////////////////////////////////////
// Chebyshev smearing
/////////////////////////////////////
RealD lo = 0.0;
RealD hi = 12.0; // Analytic free field bound
HermitianLinearOperator<Laplacian_t,Field> HermOp(Laplacian);
std::cout << " Checking spectral range of our POSITIVE definite operator \n";
PowerMethod<Field> PM;
PM(HermOp,kronecker);
// Chebyshev<Field> ChebySmear(lo,hi,20,DistillationSmearing);
Chebyshev<Field> ChebySmear(lo,hi,20,MomentumSmearing);
{
std::ofstream of("chebysmear");
ChebySmear.csv(of);
}
ChebySmear(HermOp,kronecker,chi);
std::cout << " Chebyshev smeared operator is chi = \n" << chi <<std::endl;
Grid_finalize();
}

View File

@ -1,129 +0,0 @@
#include <Grid/Grid.h>
using namespace Grid;
template<class Field>
void SimpleConjugateGradient(LinearOperatorBase<Field> &HPDop,const Field &b, Field &x)
{
RealD cp, c, alpha, d, beta, ssq, qq;
RealD Tolerance=1.0e-10;
int MaxIterations=10000;
Field p(b), mmp(b), r(b);
HPDop.HermOpAndNorm(x, mmp, d, beta);
r = b - mmp;
p = r;
cp = alpha = norm2(p);
ssq = norm2(b);
RealD rsq = Tolerance * Tolerance * ssq;
for (int k = 1; k <= MaxIterations; k++) {
c = cp;
HPDop.HermOp(p, mmp);
d = real(innerProduct(p,mmp));
alpha = c / d;
r = r - alpha *mmp;
cp = norm2(r);
beta = cp / c;
x = x + alpha* p ;
p = r + beta* p ;
std::cout << "iteration "<<k<<" cp " <<std::sqrt(cp/ssq) << std::endl;
if (cp <= rsq) {
return;
}
}
assert(0);
}
// Flip sign to make prop to p^2, not -p^2 relative to last example
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
{
public:
INHERIT_GIMPL_TYPES(Gimpl);
GridBase *grid;
GaugeField U;
RealD m2=1.0e-2;
CovariantLaplacianCshift(GaugeField &_U) :
grid(_U.Grid()),
U(_U) { };
virtual GridBase *Grid(void) { return grid; };
virtual void M (const Field &in, Field &out)
{
out=Zero();
for(int mu=0;mu<Nd-1;mu++) {
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
out = out - Gimpl::CovShiftForward(Umu,mu,in);
out = out - Gimpl::CovShiftBackward(Umu,mu,in);
out = out + 2.0*in + m2*in;
}
};
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
};
int main(int argc, char ** argv)
{
Grid_init(&argc, &argv);
typedef LatticeColourVector Field;
auto latt_size = GridDefaultLatt();
auto simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
auto mpi_layout = GridDefaultMpi();
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
GridParallelRNG RNG(&Grid); RNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
LatticeGaugeField U(&Grid);
SU<Nc>::ColdConfiguration(RNG,U);
typedef CovariantLaplacianCshift <PeriodicGimplR,Field> Laplacian_t;
Laplacian_t Laplacian(U);
ColourVector ColourKronecker;
ColourKronecker = Zero();
ColourKronecker()()(0) = 1.0;
Coordinate site({0,0,0,0}); // Point source at origin
Field kronecker(&Grid);
kronecker = Zero();
pokeSite(ColourKronecker,kronecker,site);
Field psi(&Grid); psi=Zero();
HermitianLinearOperator<Laplacian_t,Field> HermOp(Laplacian);
SimpleConjugateGradient(HermOp, kronecker,psi);
Field r(&Grid);
Laplacian.M(psi,r);
r=kronecker-r;
std::cout << "True residual "<< norm2(r) <<std::endl;
// Optionally print the result vector
// std::cout << psi<<std::endl;
Grid_finalize();
}

View File

@ -1,314 +0,0 @@
/*
* Warning: This code illustrative only: not well tested, and not meant for production use
* without regression / tests being applied
*/
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
{
public:
INHERIT_GIMPL_TYPES(Gimpl);
GridBase *grid;
GaugeField U;
CovariantLaplacianCshift(GaugeField &_U) :
grid(_U.Grid()),
U(_U) { };
virtual GridBase *Grid(void) { return grid; };
virtual void M (const Field &in, Field &out)
{
out=Zero();
for(int mu=0;mu<Nd-1;mu++) {
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
out = out - Gimpl::CovShiftForward(Umu,mu,in);
out = out - Gimpl::CovShiftBackward(Umu,mu,in);
out = out + 2.0*in;
}
};
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
};
void MakePhase(Coordinate mom,LatticeComplex &phase)
{
GridBase *grid = phase.Grid();
auto latt_size = grid->GlobalDimensions();
ComplexD ci(0.0,1.0);
phase=Zero();
LatticeComplex coor(phase.Grid());
for(int mu=0;mu<Nd;mu++){
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
LatticeCoordinate(coor,mu);
phase = phase + (TwoPiL * mom[mu]) * coor;
}
phase = exp(phase*ci);
}
void PointSource(Coordinate &coor,LatticePropagator &source)
{
// Coordinate coor({0,0,0,0});
source=Zero();
SpinColourMatrix kronecker; kronecker=1.0;
pokeSite(kronecker,source,coor);
}
void Z2WallSource(GridParallelRNG &RNG,int tslice,LatticePropagator &source)
{
GridBase *grid = source.Grid();
LatticeComplex noise(grid);
LatticeComplex zz(grid); zz=Zero();
LatticeInteger t(grid);
RealD nrm=1.0/sqrt(2);
bernoulli(RNG, noise); // 0,1 50:50
noise = (2.*noise - Complex(1,1))*nrm;
LatticeCoordinate(t,Tdir);
noise = where(t==Integer(tslice), noise, zz);
source = 1.0;
source = source*noise;
std::cout << " Z2 wall " << norm2(source) << std::endl;
}
template<class Field>
void GaussianSmear(LatticeGaugeField &U,Field &unsmeared,Field &smeared)
{
typedef CovariantLaplacianCshift <PeriodicGimplR,Field> Laplacian_t;
Laplacian_t Laplacian(U);
Integer Iterations = 40;
Real width = 2.0;
Real coeff = (width*width) / Real(4*Iterations);
Field tmp(U.Grid());
smeared=unsmeared;
// chi = (1-p^2/2N)^N kronecker
for(int n = 0; n < Iterations; ++n) {
Laplacian.M(smeared,tmp);
smeared = smeared - coeff*tmp;
std::cout << " smear iter " << n<<" " <<norm2(smeared)<<std::endl;
}
}
void GaussianSource(Coordinate &site,LatticeGaugeField &U,LatticePropagator &source)
{
LatticePropagator tmp(source.Grid());
PointSource(site,source);
std::cout << " GaussianSource Kronecker "<< norm2(source)<<std::endl;
tmp = source;
GaussianSmear(U,tmp,source);
std::cout << " GaussianSource Smeared "<< norm2(source)<<std::endl;
}
void GaussianWallSource(GridParallelRNG &RNG,int tslice,LatticeGaugeField &U,LatticePropagator &source)
{
Z2WallSource(RNG,tslice,source);
auto tmp = source;
GaussianSmear(U,tmp,source);
}
void SequentialSource(int tslice,Coordinate &mom,LatticePropagator &spectator,LatticePropagator &source)
{
assert(mom.size()==Nd);
assert(mom[Tdir] == 0);
GridBase * grid = spectator.Grid();
LatticeInteger ts(grid);
LatticeCoordinate(ts,Tdir);
source = Zero();
source = where(ts==Integer(tslice),spectator,source); // Stick in a slice of the spectator, zero everywhere else
LatticeComplex phase(grid);
MakePhase(mom,phase);
source = source *phase;
}
template<class Action>
void Solve(Action &D,LatticePropagator &source,LatticePropagator &propagator)
{
GridBase *UGrid = D.GaugeGrid();
GridBase *FGrid = D.FermionGrid();
LatticeFermion src4 (UGrid);
LatticeFermion src5 (FGrid);
LatticeFermion result5(FGrid);
LatticeFermion result4(UGrid);
ConjugateGradient<LatticeFermion> CG(1.0e-8,100000);
SchurRedBlackDiagMooeeSolve<LatticeFermion> schur(CG);
ZeroGuesser<LatticeFermion> ZG; // Could be a DeflatedGuesser if have eigenvectors
for(int s=0;s<Nd;s++){
for(int c=0;c<Nc;c++){
PropToFerm<Action>(src4,source,s,c);
D.ImportPhysicalFermionSource(src4,src5);
result5=Zero();
schur(D,src5,result5,ZG);
std::cout<<GridLogMessage
<<"spin "<<s<<" color "<<c
<<" norm2(src5d) " <<norm2(src5)
<<" norm2(result5d) "<<norm2(result5)<<std::endl;
D.ExportPhysicalFermionSolution(result5,result4);
FermToProp<Action>(propagator,result4,s,c);
}
}
}
class MesonFile: Serializable {
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(MesonFile, std::vector<std::vector<Complex> >, data);
};
void MesonTrace(std::string file,LatticePropagator &q1,LatticePropagator &q2,LatticeComplex &phase)
{
const int nchannel=4;
Gamma::Algebra Gammas[nchannel][2] = {
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::Gamma5},
{Gamma::Algebra::GammaTGamma5,Gamma::Algebra::GammaTGamma5},
{Gamma::Algebra::GammaTGamma5,Gamma::Algebra::Gamma5},
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::GammaTGamma5}
};
Gamma G5(Gamma::Algebra::Gamma5);
LatticeComplex meson_CF(q1.Grid());
MesonFile MF;
for(int ch=0;ch<nchannel;ch++){
Gamma Gsrc(Gammas[ch][0]);
Gamma Gsnk(Gammas[ch][1]);
meson_CF = trace(G5*adj(q1)*G5*Gsnk*q2*adj(Gsrc));
std::vector<TComplex> meson_T;
sliceSum(meson_CF,meson_T, Tdir);
int nt=meson_T.size();
std::vector<Complex> corr(nt);
for(int t=0;t<nt;t++){
corr[t] = TensorRemove(meson_T[t]); // Yes this is ugly, not figured a work around
std::cout << " channel "<<ch<<" t "<<t<<" " <<corr[t]<<std::endl;
}
MF.data.push_back(corr);
}
{
XmlWriter WR(file);
write(WR,"MesonFile",MF);
}
}
int main (int argc, char ** argv)
{
const int Ls=8;
Grid_init(&argc,&argv);
// Double precision grids
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
//////////////////////////////////////////////////////////////////////
// You can manage seeds however you like.
// Recommend SeedUniqueString.
//////////////////////////////////////////////////////////////////////
std::vector<int> seeds4({1,2,3,4});
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
LatticeGaugeField Umu(UGrid);
std::string config;
if( argc > 1 && argv[1][0] != '-' )
{
std::cout<<GridLogMessage <<"Loading configuration from "<<argv[1]<<std::endl;
FieldMetaData header;
NerscIO::readConfiguration(Umu, header, argv[1]);
config=argv[1];
}
else
{
std::cout<<GridLogMessage <<"Using hot configuration"<<std::endl;
SU<Nc>::ColdConfiguration(Umu);
// SU<Nc>::HotConfiguration(RNG4,Umu);
config="HotConfig";
}
std::vector<RealD> masses({ 0.03,0.04,0.45} ); // u/d, s, c ??
int nmass = masses.size();
std::vector<MobiusFermionR *> FermActs;
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"MobiusFermion action as Scaled Shamir kernel"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
for(auto mass: masses) {
RealD M5=1.0;
RealD b=1.5;// Scale factor b+c=2, b-c=1
RealD c=0.5;
FermActs.push_back(new MobiusFermionR(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c));
}
LatticePropagator point_source(UGrid);
LatticePropagator wall_source(UGrid);
LatticePropagator gaussian_source(UGrid);
Coordinate Origin({0,0,0,0});
PointSource (Origin,point_source);
Z2WallSource (RNG4,0,wall_source);
GaussianSource(Origin,Umu,gaussian_source);
std::vector<LatticePropagator> PointProps(nmass,UGrid);
std::vector<LatticePropagator> GaussProps(nmass,UGrid);
std::vector<LatticePropagator> Z2Props (nmass,UGrid);
for(int m=0;m<nmass;m++) {
Solve(*FermActs[m],point_source ,PointProps[m]);
Solve(*FermActs[m],gaussian_source,GaussProps[m]);
Solve(*FermActs[m],wall_source ,Z2Props[m]);
}
LatticeComplex phase(UGrid);
Coordinate mom({0,0,0,0});
MakePhase(mom,phase);
for(int m1=0 ;m1<nmass;m1++) {
for(int m2=m1;m2<nmass;m2++) {
std::stringstream ssp,ssg,ssz;
ssp<<config<< "_m" << m1 << "_m"<< m2 << "_point_meson.xml";
ssg<<config<< "_m" << m1 << "_m"<< m2 << "_smeared_meson.xml";
ssz<<config<< "_m" << m1 << "_m"<< m2 << "_wall_meson.xml";
MesonTrace(ssp.str(),PointProps[m1],PointProps[m2],phase);
MesonTrace(ssg.str(),GaussProps[m1],GaussProps[m2],phase);
MesonTrace(ssz.str(),Z2Props[m1],Z2Props[m2],phase);
}}
Grid_finalize();
}

View File

@ -1,6 +0,0 @@
SUBDIRS = .
include Make.inc

View File

@ -82,18 +82,3 @@ for f in $TESTS; do
echo >> Make.inc
done
cd ..
# examples Make.inc
cd $home/examples/
echo> Make.inc
TESTS=`ls *.cc`
TESTLIST=`echo ${TESTS} | sed s/.cc//g `
echo bin_PROGRAMS = ${TESTLIST} > Make.inc
echo >> Make.inc
for f in $TESTS; do
BNAME=`basename $f .cc`
echo ${BNAME}_SOURCES=$f >> Make.inc
echo ${BNAME}_LDADD='$(top_builddir)/Grid/libGrid.a'>> Make.inc
echo >> Make.inc
done
cd ..

View File

@ -1,73 +0,0 @@
#Example script
DIR=/gpfs/alpine/phy157/proj-shared/phy157dwf/chulwoo/Grid/BL/build/tests/lanczos
BIN=${DIR}/Test_dwf_block_lanczos
VOL='--grid 16.16.16.32 '
GRID='--mpi 1.1.1.4 '
CONF='--gconf ckpoint_lat.IEEE64BIG.2000 '
OPT='--mass 0.01 --M5 1.8 --phase in.params --omega in.params --shm 4096'
#BL='--rbl 16.1024.128.1000.10 --split 1.1.4.4 --check_int 100 --resid 1.0e-5 --cheby_l 0.007 --cheby_u 7 --cheby_n 51'
BL='--rbl 4.128.16.100.10 --split 1.1.1.4 --check_int 25 --resid 1.0e-5 --cheby_l 0.007 --cheby_u 7 --cheby_n 51'
ARGS=${CONF}" "${OPT}" "${BL}" "${VOL}" "${GRID}
export APP="${BIN} ${ARGS}"
echo APP=${APP}
#export JS="jsrun --nrs 32 -a4 -g4 -c42 -dpacked -b packed:7 --smpiargs="-gpu" "
export JS="jsrun --nrs 1 -a4 -g4 -c42 -dpacked -b packed:10 --smpiargs="-gpu" "
$JS $APP
#sample in.param
boundary_phase 0 1 0
boundary_phase 1 1 0
boundary_phase 2 1 0
boundary_phase 3 -1 0
omega 0 0.5 0
omega 1 0.5 0
omega 2 0.5 0
omega 3 0.5 0
omega 4 0.5 0
omega 5 0.5 0
omega 6 0.5 0
omega 7 0.5 0
omega 8 0.5 0
omega 9 0.5 0
omega 10 0.5 0
omega 11 0.5 0
#output
Grid : Message : 1.717474 s : Gauge Configuration ckpoint_lat.IEEE64BIG.2000
Grid : Message : 1.717478 s : boundary_phase[0] = (1,0)
Grid : Message : 1.717497 s : boundary_phase[1] = (1,0)
Grid : Message : 1.717500 s : boundary_phase[2] = (1,0)
Grid : Message : 1.717503 s : boundary_phase[3] = (-1,0)
Grid : Message : 1.717506 s : Ls 12
Grid : Message : 1.717507 s : mass 0.01
Grid : Message : 1.717510 s : M5 1.8
Grid : Message : 1.717512 s : mob_b 1.5
Grid : Message : 1.717514 s : omega[0] = (0.5,0)
Grid : Message : 1.717517 s : omega[1] = (0.5,0)
Grid : Message : 1.717520 s : omega[2] = (0.5,0)
Grid : Message : 1.717523 s : omega[3] = (0.5,0)
Grid : Message : 1.717526 s : omega[4] = (0.5,0)
Grid : Message : 1.717529 s : omega[5] = (0.5,0)
Grid : Message : 1.717532 s : omega[6] = (0.5,0)
Grid : Message : 1.717535 s : omega[7] = (0.5,0)
Grid : Message : 1.717538 s : omega[8] = (0.5,0)
Grid : Message : 1.717541 s : omega[9] = (0.5,0)
Grid : Message : 1.717544 s : omega[10] = (0.5,0)
Grid : Message : 1.717547 s : omega[11] = (0.5,0)
Grid : Message : 1.717550 s : Nu 4
Grid : Message : 1.717551 s : Nk 128
Grid : Message : 1.717552 s : Np 16
Grid : Message : 1.717553 s : Nm 288
Grid : Message : 1.717554 s : Nstop 100
Grid : Message : 1.717555 s : Ntest 25
Grid : Message : 1.717557 s : MaxIter 10
Grid : Message : 1.717558 s : resid 1e-05
Grid : Message : 1.717560 s : Cheby Poly 0.007,7,51

View File

@ -1,403 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_dwf_block_lanczos.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/util/Init.h>
#include <Grid/algorithms/iterative/ImplicitlyRestartedBlockLanczos.h>
using namespace std;
using namespace Grid;
//using namespace Grid::QCD;
//typedef typename GparityDomainWallFermionR::FermionField FermionField;
typedef typename ZMobiusFermionF::FermionField FermionField;
RealD AllZero(RealD x){ return 0.;}
class CmdJobParams
{
public:
std::string gaugefile;
int Ls;
double mass;
double M5;
double mob_b;
std::vector<ComplexD> omega;
std::vector<Complex> boundary_phase;
std::vector<int> mpi_split;
LanczosType Impl;
int Nu;
int Nk;
int Np;
int Nm;
int Nstop;
int Ntest;
int MaxIter;
double resid;
double low;
double high;
int order;
CmdJobParams()
: gaugefile("Hot"),
Ls(8), mass(0.01), M5(1.8), mob_b(1.5),
Impl(LanczosType::irbl),mpi_split(4,1),
Nu(4), Nk(200), Np(200), Nstop(100), Ntest(1), MaxIter(10), resid(1.0e-8),
low(0.2), high(5.5), order(11)
{Nm=Nk+Np;};
void Parse(char **argv, int argc);
};
void CmdJobParams::Parse(char **argv,int argc)
{
std::string arg;
std::vector<int> vi;
double re,im;
int expect, idx;
std::string vstr;
std::ifstream pfile;
if( GridCmdOptionExists(argv,argv+argc,"--gconf") ){
gaugefile = GridCmdOptionPayload(argv,argv+argc,"--gconf");
}
if( GridCmdOptionExists(argv,argv+argc,"--phase") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--phase");
pfile.open(arg);
assert(pfile);
expect = 0;
while( pfile >> vstr ) {
if ( vstr.compare("boundary_phase") == 0 ) {
pfile >> vstr;
GridCmdOptionInt(vstr,idx);
assert(expect==idx);
pfile >> vstr;
GridCmdOptionFloat(vstr,re);
pfile >> vstr;
GridCmdOptionFloat(vstr,im);
boundary_phase.push_back({re,im});
expect++;
}
}
pfile.close();
} else {
for (int i=0; i<4; ++i) boundary_phase.push_back({1.,0.});
}
if( GridCmdOptionExists(argv,argv+argc,"--omega") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--omega");
pfile.open(arg);
assert(pfile);
Ls = 0;
while( pfile >> vstr ) {
if ( vstr.compare("omega") == 0 ) {
pfile >> vstr;
GridCmdOptionInt(vstr,idx);
assert(Ls==idx);
pfile >> vstr;
GridCmdOptionFloat(vstr,re);
pfile >> vstr;
GridCmdOptionFloat(vstr,im);
omega.push_back({re,im});
Ls++;
}
}
pfile.close();
} else {
if( GridCmdOptionExists(argv,argv+argc,"--Ls") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--Ls");
GridCmdOptionInt(arg,Ls);
}
}
if( GridCmdOptionExists(argv,argv+argc,"--mass") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--mass");
GridCmdOptionFloat(arg,mass);
}
if( GridCmdOptionExists(argv,argv+argc,"--M5") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--M5");
GridCmdOptionFloat(arg,M5);
}
if( GridCmdOptionExists(argv,argv+argc,"--mob_b") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--mob_b");
GridCmdOptionFloat(arg,mob_b);
}
if( GridCmdOptionExists(argv,argv+argc,"--irbl") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--irbl");
GridCmdOptionIntVector(arg,vi);
Nu = vi[0];
Nk = vi[1];
Np = vi[2];
Nstop = vi[3];
MaxIter = vi[4];
// ypj[fixme] mode overriding message is needed.
Impl = LanczosType::irbl;
Nm = Nk+Np;
}
// block Lanczos with explicit extension of its dimensions
if( GridCmdOptionExists(argv,argv+argc,"--rbl") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--rbl");
GridCmdOptionIntVector(arg,vi);
Nu = vi[0];
Nk = vi[1];
Np = vi[2]; // vector space is enlarged by adding Np vectors
Nstop = vi[3];
MaxIter = vi[4];
// ypj[fixme] mode overriding message is needed.
Impl = LanczosType::rbl;
Nm = Nk+Np*MaxIter;
}
#if 1
// block Lanczos with explicit extension of its dimensions
if( GridCmdOptionExists(argv,argv+argc,"--split") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--split");
GridCmdOptionIntVector(arg,vi);
for(int i=0;i<mpi_split.size();i++)
mpi_split[i] = vi[i];
}
#endif
if( GridCmdOptionExists(argv,argv+argc,"--check_int") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--check_int");
GridCmdOptionInt(arg,Ntest);
}
if( GridCmdOptionExists(argv,argv+argc,"--resid") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--resid");
GridCmdOptionFloat(arg,resid);
}
if( GridCmdOptionExists(argv,argv+argc,"--cheby_l") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--cheby_l");
GridCmdOptionFloat(arg,low);
}
if( GridCmdOptionExists(argv,argv+argc,"--cheby_u") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--cheby_u");
GridCmdOptionFloat(arg,high);
}
if( GridCmdOptionExists(argv,argv+argc,"--cheby_n") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--cheby_n");
GridCmdOptionInt(arg,order);
}
if ( CartesianCommunicator::RankWorld() == 0 ) {
std::streamsize ss = std::cout.precision();
std::cout << GridLogMessage <<" Gauge Configuration "<< gaugefile << '\n';
std::cout.precision(15);
for ( int i=0; i<4; ++i ) std::cout << GridLogMessage <<" boundary_phase["<< i << "] = " << boundary_phase[i] << '\n';
std::cout.precision(ss);
std::cout << GridLogMessage <<" Ls "<< Ls << '\n';
std::cout << GridLogMessage <<" mass "<< mass << '\n';
std::cout << GridLogMessage <<" M5 "<< M5 << '\n';
std::cout << GridLogMessage <<" mob_b "<< mob_b << '\n';
std::cout.precision(15);
for ( int i=0; i<Ls; ++i ) std::cout << GridLogMessage <<" omega["<< i << "] = " << omega[i] << '\n';
std::cout.precision(ss);
std::cout << GridLogMessage <<" Nu "<< Nu << '\n';
std::cout << GridLogMessage <<" Nk "<< Nk << '\n';
std::cout << GridLogMessage <<" Np "<< Np << '\n';
std::cout << GridLogMessage <<" Nm "<< Nm << '\n';
std::cout << GridLogMessage <<" Nstop "<< Nstop << '\n';
std::cout << GridLogMessage <<" Ntest "<< Ntest << '\n';
std::cout << GridLogMessage <<" MaxIter "<< MaxIter << '\n';
std::cout << GridLogMessage <<" resid "<< resid << '\n';
std::cout << GridLogMessage <<" Cheby Poly "<< low << "," << high << "," << order << std::endl;
}
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
CmdJobParams JP;
JP.Parse(argv,argc);
GridCartesian * UGridD = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(JP.Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(JP.Ls,UGrid);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
// ypj [note] why seed RNG5 again? bug? In this case, run with a default seed().
GridParallelRNG RNG5rb(FrbGrid); RNG5rb.SeedFixedIntegers(seeds5);
LatticeGaugeField UmuD(UGridD);
LatticeGaugeFieldF Umu(UGrid);
std::vector<LatticeColourMatrixF> U(4,UGrid);
if ( JP.gaugefile.compare("Hot") == 0 ) {
SU3::HotConfiguration(RNG4, UmuD);
} else {
FieldMetaData header;
NerscIO::readConfiguration(UmuD,header,JP.gaugefile);
// ypj [fixme] additional checks for the loaded configuration?
}
precisionChange(Umu,UmuD);
for(int mu=0;mu<Nd;mu++){
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
}
RealD mass = JP.mass;
RealD M5 = JP.M5;
// ypj [fixme] flexible support for a various Fermions
// RealD mob_b = JP.mob_b; // Gparity
// std::vector<ComplexD> omega; // ZMobius
// GparityMobiusFermionD ::ImplParams params;
// std::vector<int> twists({1,1,1,0});
// params.twists = twists;
// GparityMobiusFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,mob_b,mob_b-1.,params);
// SchurDiagTwoOperator<GparityMobiusFermionR,FermionField> HermOp(Ddwf);
// int mrhs = JP.Nu;
int Ndir=4;
auto mpi_layout = GridDefaultMpi();
std::vector<int> mpi_split (Ndir,1);
#if 0
int tmp=mrhs, dir=0;
std::cout << GridLogMessage << "dir= "<<dir <<"tmp= "<<tmp<<"mpi_split= "<<mpi_split[dir]<<"mpi_layout= "<<mpi_split[dir]<<std::endl;
while ( tmp> 1) {
if ((mpi_split[dir]*2) <= mpi_layout[dir]){
mpi_split[dir] *=2;
tmp = tmp/2;
}
std::cout << GridLogMessage << "dir= "<<dir <<"tmp= "<<tmp<<"mpi_split= "<<mpi_split[dir]<<"mpi_layout= "<<mpi_layout[dir]<<std::endl;
dir = (dir+1)%Ndir;
}
#endif
int mrhs=1;
for(int i =0;i<Ndir;i++){
mpi_split[i] = mpi_layout[i] / JP.mpi_split[i] ;
mrhs *= JP.mpi_split[i];
}
std::cout << GridLogMessage << "mpi_layout= " << mpi_layout << std::endl;
std::cout << GridLogMessage << "mpi_split= " << mpi_split << std::endl;
std::cout << GridLogMessage << "mrhs= " << mrhs << std::endl;
// assert(JP.Nu==tmp);
/////////////////////////////////////////////
// Split into 1^4 mpi communicators
/////////////////////////////////////////////
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplexF::Nsimd()),
mpi_split,
*UGrid);
GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(JP.Ls,SGrid);
GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(JP.Ls,SGrid);
LatticeGaugeFieldF s_Umu(SGrid);
Grid_split (Umu,s_Umu);
//WilsonFermionR::ImplParams params;
ZMobiusFermionF::ImplParams params;
params.overlapCommsCompute = true;
params.boundary_phases = JP.boundary_phase;
ZMobiusFermionF Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,JP.omega,1.,0.,params);
// SchurDiagTwoOperator<ZMobiusFermionR,FermionField> HermOp(Ddwf);
SchurDiagOneOperator<ZMobiusFermionF,FermionField> HermOp(Ddwf);
ZMobiusFermionF Dsplit(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5,JP.omega,1.,0.,params);
// SchurDiagTwoOperator<ZMobiusFermionR,FermionField> SHermOp(Dsplit);
SchurDiagOneOperator<ZMobiusFermionF,FermionField> SHermOp(Dsplit);
//std::vector<double> Coeffs { 0.,-1.};
// ypj [note] this may not be supported by some compilers
std::vector<double> Coeffs({ 0.,-1.});
Polynomial<FermionField> PolyX(Coeffs);
//Chebyshev<FermionField> Cheb(0.2,5.5,11);
Chebyshev<FermionField> Cheb(JP.low,JP.high,JP.order);
// Cheb.csv(std::cout);
ImplicitlyRestartedBlockLanczos<FermionField> IRBL(HermOp, SHermOp,
FrbGrid,SFrbGrid,mrhs,
Cheb,
JP.Nstop, JP.Ntest,
JP.Nu, JP.Nk, JP.Nm,
JP.resid,
JP.MaxIter,
IRBLdiagonaliseWithEigen);
// IRBLdiagonaliseWithLAPACK);
IRBL.split_test=0;
std::vector<RealD> eval(JP.Nm);
std::vector<FermionField> src(JP.Nu,FrbGrid);
if (0)
{
// in case RNG is too slow
std::cout << GridLogMessage << "Using RNG5"<<std::endl;
FermionField src_tmp(FGrid);
for ( int i=0; i<JP.Nu; ++i ){
// gaussian(RNG5,src_tmp);
ComplexD rnd;
RealD re;
fillScalar(re,RNG5._gaussian[0],RNG5._generators[0]);
std::cout << i <<" / "<< JP.Nm <<" re "<< re << std::endl;
// printf("%d / %d re %e\n",i,FGrid->_processor,re);
src_tmp=re;
pickCheckerboard(Odd,src[i],src_tmp);
}
RNG5.Report();
} else {
std::cout << GridLogMessage << "Using RNG5rb"<<std::endl;
for ( int i=0; i<JP.Nu; ++i )
gaussian(RNG5rb,src[i]);
RNG5rb.Report();
}
std::vector<FermionField> evec(JP.Nm,FrbGrid);
for(int i=0;i<1;++i){
std::cout << GridLogMessage << i <<" / "<< JP.Nm <<" grid pointer "<< evec[i].Grid() << std::endl;
};
int Nconv;
IRBL.calc(eval,evec,src,Nconv,JP.Impl);
Grid_finalize();
}

View File

@ -1,27 +0,0 @@
# run script on Cori
#!/bin/bash
#SBATCH -t 30
#SBATCH -N 16
#SBATCH -C knl
#SBATCH -A mp13
#SBATCH --ntasks-per-node=1
export OMP_PROC_BIND=true
export OMP_PLACES=threads
export OMP_NUM_THREADS=64
#BIN=benchmarks/Benchmark_ITT
#OPT='--cpu-bind=cores'
BIN=./Test_dwf_block_lanczos
rundir=.
CONF='--gconf '${rundir}'/ckpoint_lat.IEEE64BIG.5000'
VOL='--grid 16.16.16.32'
GRID='--mpi 1.2.2.4'
OPT='--mass 0.00054 --M5 1.8 --phase in.params --omega in.params --shm 4096'
BL='--rbl 4.32.32.30.7 --split 1.2.2.1 --check_int 8 --resid 1.0e-5 --cheby_l 0.0027 --cheby_u 7 --cheby_n 51'
#echo srun $OPT $BIN $gridoptions $jobparams
#srun $OPT $BIN $gridoptions $jobparams
echo srun $BIN $CONF $OPT $BL $VOL $GRID
srun $BIN $CONF $OPT $BL $VOL $GRID

View File

@ -1,17 +0,0 @@
boundary_phase 0 1 0
boundary_phase 1 1 0
boundary_phase 2 1 0
boundary_phase 3 -1 0
omega 0 0.375 0
omega 1 0.375 0
omega 2 0.375 0
omega 3 0.375 0
omega 4 0.375 0
omega 5 0.375 0
omega 6 0.375 0
omega 7 0.375 0
omega 8 0.375 0
omega 9 0.375 0
omega 10 0.375 0
omega 11 0.375 0

View File

@ -33,6 +33,7 @@ namespace Grid{
GRID_SERIALIZABLE_CLASS_MEMBERS(WFParameters,
int, steps,
double, step_size,
double, tol,
int, meas_interval,
double, maxTau); // for the adaptive algorithm
@ -82,13 +83,27 @@ int main(int argc, char **argv) {
SU<Nc>::HotConfiguration(pRNG, Umu);
typedef Grid::XmlReader Serialiser;
Serialiser Reader("input.xml");
WFParameters WFPar(Reader);
ConfParameters CPar(Reader);
CheckpointerParameters CPPar(CPar.conf_prefix, CPar.rng_prefix);
// Serialiser Reader("input.xml");
// WFParameters WFPar(Reader);
// ConfParameters CPar(Reader);
// WFParameters WFPar;
int steps = 800;
double step_size=0.02;
double tol=1e-4;
int meas_interval=50;
double maxTau = 16;
// ConfParameters CPar;
// CPar. conf_prefix="configurations/ckpoint_lat";
// CPar. rng_prefix="rngs/ckpoint_rng";
// CPar. StartConfiguration=100,
// CPar. EndConfiguration=110,
// CPar. Skip=1;
// CheckpointerParameters CPPar(CPar.conf_prefix, CPar.rng_prefix);
CheckpointerParameters CPPar("configurations/ckpoint_lat","rngs/ckpoint_rng");
BinaryHmcCheckpointer<PeriodicGimplR> CPBin(CPPar);
for (int conf = CPar.StartConfiguration; conf <= CPar.EndConfiguration; conf+= CPar.Skip){
// for (int conf = CPar.StartConfiguration; conf <= CPar.EndConfiguration; conf+= CPar.Skip){
for (int conf = 100; conf <= 110; conf+= 1){
CPBin.CheckpointRestore(conf, Umu, sRNG, pRNG);
@ -96,9 +111,10 @@ int main(int argc, char **argv) {
std::cout << GridLogMessage << "Initial plaquette: "
<< WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu) << std::endl;
WilsonFlow<PeriodicGimplR> WF(WFPar.steps, WFPar.step_size, WFPar.meas_interval);
WilsonFlow<PeriodicGimplR> WF(steps, step_size, meas_interval);
WF.smear_adaptive(Uflow, Umu, WFPar.maxTau);
// WF.smear_adaptive(Uflow, Umu, maxTau);
WF.smear(Uflow, Umu);
RealD WFlow_plaq = WilsonLoops<PeriodicGimplR>::avgPlaquette(Uflow);
RealD WFlow_TC = WilsonLoops<PeriodicGimplR>::TopologicalCharge(Uflow);