1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-13 20:57:06 +01:00

Merge branch 'feature/dirichlet' of https://github.com/paboyle/Grid into feature/dirichlet

This commit is contained in:
Peter Boyle
2023-03-21 16:04:27 -04:00
19 changed files with 377 additions and 93 deletions

View File

@ -81,6 +81,7 @@ public:
using OperatorFunction<FieldD>::operator();
RealD Tolerance;
Integer MaxIterationsMshift;
Integer MaxIterations;
Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
std::vector<int> IterationsToCompleteShift; // Iterations for this shift
@ -95,9 +96,9 @@ public:
ConjugateGradientMultiShiftMixedPrec(Integer maxit, const MultiShiftFunction &_shifts,
GridBase* _SinglePrecGrid, LinearOperatorBase<FieldF> &_Linop_f,
int _ReliableUpdateFreq
) :
MaxIterations(maxit), shifts(_shifts), SinglePrecGrid(_SinglePrecGrid), Linop_f(_Linop_f), ReliableUpdateFreq(_ReliableUpdateFreq)
int _ReliableUpdateFreq) :
MaxIterationsMshift(maxit), shifts(_shifts), SinglePrecGrid(_SinglePrecGrid), Linop_f(_Linop_f), ReliableUpdateFreq(_ReliableUpdateFreq),
MaxIterations(20000)
{
verbose=1;
IterationsToCompleteShift.resize(_shifts.order);
@ -244,7 +245,7 @@ public:
// Iteration loop
int k;
for (k=1;k<=MaxIterations;k++){
for (k=1;k<=MaxIterationsMshift;k++){
a = c /cp;
AXPYTimer.Start();
@ -350,12 +351,17 @@ public:
}
}
if ( all_converged ){
if ( all_converged || k == MaxIterationsMshift-1){
SolverTimer.Stop();
std::cout<<GridLogMessage<< "ConjugateGradientMultiShiftMixedPrec: All shifts have converged iteration "<<k<<std::endl;
std::cout<<GridLogMessage<< "ConjugateGradientMultiShiftMixedPrec: Checking solutions"<<std::endl;
if ( all_converged ){
std::cout<<GridLogMessage<< "ConjugateGradientMultiShiftMixedPrec: All shifts have converged iteration "<<k<<std::endl;
std::cout<<GridLogMessage<< "ConjugateGradientMultiShiftMixedPrec: Checking solutions"<<std::endl;
} else {
std::cout<<GridLogMessage<< "ConjugateGradientMultiShiftMixedPrec: Not all shifts have converged iteration "<<k<<std::endl;
}
// Check answers
for(int s=0; s < nshift; s++) {
Linop_d.HermOpAndNorm(psi_d[s],mmp_d,d,qq);
@ -396,12 +402,10 @@ public:
return;
}
}
// ugly hack
std::cout<<GridLogMessage<<"CG multi shift did not converge"<<std::endl;
// assert(0);
assert(0);
}
};

View File

@ -29,6 +29,7 @@ Author: Christoph Lehner <christoph@lhnr.de>
#include <Grid/GridCore.h>
#include <pwd.h>
#include <syscall.h>
#ifdef GRID_CUDA
#include <cuda_runtime_api.h>

View File

@ -484,24 +484,26 @@ public:
int dag = compress.dag;
int face_idx=0;
#define vet_same_node(a,b) \
{ auto tmp = b; }
if ( dag ) {
assert(this->same_node[Xp]==this->HaloGatherDir(source,XpCompress,Xp,face_idx));
assert(this->same_node[Yp]==this->HaloGatherDir(source,YpCompress,Yp,face_idx));
assert(this->same_node[Zp]==this->HaloGatherDir(source,ZpCompress,Zp,face_idx));
assert(this->same_node[Tp]==this->HaloGatherDir(source,TpCompress,Tp,face_idx));
assert(this->same_node[Xm]==this->HaloGatherDir(source,XmCompress,Xm,face_idx));
assert(this->same_node[Ym]==this->HaloGatherDir(source,YmCompress,Ym,face_idx));
assert(this->same_node[Zm]==this->HaloGatherDir(source,ZmCompress,Zm,face_idx));
assert(this->same_node[Tm]==this->HaloGatherDir(source,TmCompress,Tm,face_idx));
vet_same_node(this->same_node[Xp],this->HaloGatherDir(source,XpCompress,Xp,face_idx));
vet_same_node(this->same_node[Yp],this->HaloGatherDir(source,YpCompress,Yp,face_idx));
vet_same_node(this->same_node[Zp],this->HaloGatherDir(source,ZpCompress,Zp,face_idx));
vet_same_node(this->same_node[Tp],this->HaloGatherDir(source,TpCompress,Tp,face_idx));
vet_same_node(this->same_node[Xm],this->HaloGatherDir(source,XmCompress,Xm,face_idx));
vet_same_node(this->same_node[Ym],this->HaloGatherDir(source,YmCompress,Ym,face_idx));
vet_same_node(this->same_node[Zm],this->HaloGatherDir(source,ZmCompress,Zm,face_idx));
vet_same_node(this->same_node[Tm],this->HaloGatherDir(source,TmCompress,Tm,face_idx));
} else {
assert(this->same_node[Xp]==this->HaloGatherDir(source,XmCompress,Xp,face_idx));
assert(this->same_node[Yp]==this->HaloGatherDir(source,YmCompress,Yp,face_idx));
assert(this->same_node[Zp]==this->HaloGatherDir(source,ZmCompress,Zp,face_idx));
assert(this->same_node[Tp]==this->HaloGatherDir(source,TmCompress,Tp,face_idx));
assert(this->same_node[Xm]==this->HaloGatherDir(source,XpCompress,Xm,face_idx));
assert(this->same_node[Ym]==this->HaloGatherDir(source,YpCompress,Ym,face_idx));
assert(this->same_node[Zm]==this->HaloGatherDir(source,ZpCompress,Zm,face_idx));
assert(this->same_node[Tm]==this->HaloGatherDir(source,TpCompress,Tm,face_idx));
vet_same_node(this->same_node[Xp],this->HaloGatherDir(source,XmCompress,Xp,face_idx));
vet_same_node(this->same_node[Yp],this->HaloGatherDir(source,YmCompress,Yp,face_idx));
vet_same_node(this->same_node[Zp],this->HaloGatherDir(source,ZmCompress,Zp,face_idx));
vet_same_node(this->same_node[Tp],this->HaloGatherDir(source,TmCompress,Tp,face_idx));
vet_same_node(this->same_node[Xm],this->HaloGatherDir(source,XpCompress,Xm,face_idx));
vet_same_node(this->same_node[Ym],this->HaloGatherDir(source,YpCompress,Ym,face_idx));
vet_same_node(this->same_node[Zm],this->HaloGatherDir(source,ZpCompress,Zm,face_idx));
vet_same_node(this->same_node[Tm],this->HaloGatherDir(source,TpCompress,Tm,face_idx));
}
this->face_table_computed=1;
assert(this->u_comm_offset==this->_unified_buffer_size);

View File

@ -52,13 +52,6 @@ public:
typedef AcceleratorVector<int,STENCIL_MAX> StencilVector;
public:
#ifdef GRID_SYCL
#define SYCL_HACK
#endif
#ifdef SYCL_HACK
static void HandDhopSiteSycl(StencilVector st_perm,StencilEntry *st_p, SiteDoubledGaugeField *U,SiteHalfSpinor *buf,
int ss,int sU,const SiteSpinor *in, SiteSpinor *out);
#endif
static void DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField &U, SiteHalfSpinor * buf,
int Ls, int Nsite, const FermionField &in, FermionField &out,

View File

@ -63,6 +63,10 @@ WilsonFermion5D<Impl>::WilsonFermion5D(GaugeField &_Umu,
_tmp(&FiveDimRedBlackGrid),
Dirichlet(0)
{
Stencil.lo = &Lebesgue;
StencilEven.lo = &LebesgueEvenOdd;
StencilOdd.lo = &LebesgueEvenOdd;
// some assertions
assert(FiveDimGrid._ndimension==5);
assert(FourDimGrid._ndimension==4);

View File

@ -60,6 +60,9 @@ WilsonFermion<Impl>::WilsonFermion(GaugeField &_Umu, GridCartesian &Fgrid,
_tmp(&Hgrid),
anisotropyCoeff(anis)
{
Stencil.lo = &Lebesgue;
StencilEven.lo = &LebesgueEvenOdd;
StencilOdd.lo = &LebesgueEvenOdd;
// Allocate the required comms buffer
ImportGauge(_Umu);
if (anisotropyCoeff.isAnisotropic){

View File

@ -433,11 +433,23 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S
});
#define ASM_CALL(A) \
thread_for( ss, Nsite, { \
thread_for( sss, Nsite, { \
int ss = st.lo->Reorder(sss); \
int sU = ss; \
int sF = ss*Ls; \
WilsonKernels<Impl>::A(st_v,U_v,buf,sF,sU,Ls,1,in_v,out_v); \
});
#define ASM_CALL_SLICE(A) \
auto grid = in.Grid() ; \
int nt = grid->LocalDimensions()[4]; \
int nxyz = Nsite/nt ; \
for(int t=0;t<nt;t++){ \
thread_for( sss, nxyz, { \
int ss = t*nxyz+sss; \
int sU = ss; \
int sF = ss*Ls; \
WilsonKernels<Impl>::A(st_v,U_v,buf,sF,sU,Ls,1,in_v,out_v); \
});}
template <class Impl>
void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField &U, SiteHalfSpinor * buf,

View File

@ -127,6 +127,8 @@ NAMESPACE_BEGIN(Grid);
ApproxNegPowerAction.tolerances[i] = action_tolerance[i];
ApproxHalfPowerAction.tolerances[i] = action_tolerance[i];
ApproxNegHalfPowerAction.tolerances[i]= action_tolerance[i];
}
for(int i=0;i<ApproxPowerMD.tolerances.size();i++){
ApproxPowerMD.tolerances[i] = md_tolerance[i];
ApproxNegPowerMD.tolerances[i] = md_tolerance[i];
ApproxHalfPowerMD.tolerances[i] = md_tolerance[i];

View File

@ -29,6 +29,8 @@
#ifndef QCD_PSEUDOFERMION_GENERAL_EVEN_ODD_RATIONAL_RATIO_MIXED_PREC_H
#define QCD_PSEUDOFERMION_GENERAL_EVEN_ODD_RATIONAL_RATIO_MIXED_PREC_H
#include <Grid/algorithms/iterative/ConjugateGradientMultiShiftCleanup.h>
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -58,7 +60,7 @@ NAMESPACE_BEGIN(Grid);
//Allow derived classes to override the multishift CG
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionFieldD &in, FermionFieldD &out){
#if 0
SchurDifferentiableOperator<ImplD> schurOp(numerator ? NumOp : DenOp);
SchurDifferentiableOperator<ImplD> schurOp(numerator ? NumOpD : DenOpD);
ConjugateGradientMultiShift<FermionFieldD> msCG(MaxIter, approx);
msCG(schurOp,in, out);
#else
@ -66,7 +68,8 @@ NAMESPACE_BEGIN(Grid);
SchurDifferentiableOperator<ImplF> schurOpF(numerator ? NumOpF : DenOpF);
FermionFieldD2 inD2(NumOpD2.FermionRedBlackGrid());
FermionFieldD2 outD2(NumOpD2.FermionRedBlackGrid());
// Action better with higher precision?
ConjugateGradientMultiShiftMixedPrec<FermionFieldD2, FermionFieldF> msCG(MaxIter, approx, NumOpF.FermionRedBlackGrid(), schurOpF, ReliableUpdateFreq);
precisionChange(inD2,in);
std::cout << "msCG single solve "<<norm2(inD2)<<" " <<norm2(in)<<std::endl;
@ -76,12 +79,12 @@ NAMESPACE_BEGIN(Grid);
}
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionFieldD &in, std::vector<FermionFieldD> &out_elems, FermionFieldD &out){
SchurDifferentiableOperator<ImplD2> schurOpD2(numerator ? NumOpD2 : DenOpD2);
SchurDifferentiableOperator<ImplF> schurOpF(numerator ? NumOpF : DenOpF);
SchurDifferentiableOperator<ImplF> schurOpF (numerator ? NumOpF : DenOpF);
FermionFieldD2 inD2(NumOpD2.FermionRedBlackGrid());
FermionFieldD2 outD2(NumOpD2.FermionRedBlackGrid());
std::vector<FermionFieldD2> out_elemsD2(out_elems.size(),NumOpD2.FermionRedBlackGrid());
ConjugateGradientMultiShiftMixedPrec<FermionFieldD2, FermionFieldF> msCG(MaxIter, approx, NumOpF.FermionRedBlackGrid(), schurOpF, ReliableUpdateFreq);
ConjugateGradientMultiShiftMixedPrecCleanup<FermionFieldD2, FermionFieldF> msCG(MaxIter, approx, NumOpF.FermionRedBlackGrid(), schurOpF, ReliableUpdateFreq);
precisionChange(inD2,in);
std::cout << "msCG in "<<norm2(inD2)<<" " <<norm2(in)<<std::endl;
msCG(schurOpD2, inD2, out_elemsD2, outD2);

View File

@ -300,9 +300,9 @@ public:
protected:
GridBase * _grid;
public:
GridBase *Grid(void) const { return _grid; }
LebesgueOrder *lo;
////////////////////////////////////////////////////////////////////////
// Needed to conveniently communicate gparity parameters into GPU memory
@ -348,6 +348,7 @@ public:
////////////////////////////////////////
// Stencil query
////////////////////////////////////////
#ifdef SHM_FAST_PATH
inline int SameNode(int point) {
int dimension = this->_directions[point];
@ -367,7 +368,40 @@ public:
if ( displacement == 0 ) return 1;
return 0;
}
#else
// fancy calculation for shm code
inline int SameNode(int point) {
int dimension = this->_directions[point];
int displacement = this->_distances[point];
int pd = _grid->_processors[dimension];
int fd = _grid->_fdimensions[dimension];
int ld = _grid->_ldimensions[dimension];
int rd = _grid->_rdimensions[dimension];
int simd_layout = _grid->_simd_layout[dimension];
int comm_dim = _grid->_processors[dimension] >1 ;
int recv_from_rank;
int xmit_to_rank;
if ( ! comm_dim ) return 1;
int nbr_proc;
if (displacement>0) nbr_proc = 1;
else nbr_proc = pd-1;
// FIXME this logic needs to be sorted for three link term
// assert( (displacement==1) || (displacement==-1));
// Present hack only works for >= 4^4 subvol per node
_grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
void *shm = (void *) _grid->ShmBufferTranslate(recv_from_rank,this->u_recv_buf_p);
if ( shm==NULL ) return 0;
return 1;
}
#endif
//////////////////////////////////////////
// Comms packet queue for asynch thread
// Use OpenMP Tasks for cleaner ???
@ -1075,7 +1109,7 @@ public:
int comms_recv = this->_comms_recv[point];
int comms_partial_send = this->_comms_partial_send[point] ;
int comms_partial_recv = this->_comms_partial_recv[point] ;
assert(rhs.Grid()==_grid);
// conformable(_grid,rhs.Grid());
@ -1146,11 +1180,32 @@ public:
recv_buf=this->u_recv_buf_p;
}
// potential SHM fast path for intranode
int shm_send=0;
int shm_recv=0;
#ifdef SHM_FAST_PATH
// Put directly in place if we can
send_buf = (cobj *)_grid->ShmBufferTranslate(xmit_to_rank,recv_buf);
if ( (send_buf==NULL) ) {
shm_send=0;
send_buf = this->u_send_buf_p;
} else {
shm_send=1;
}
void *test_ptr = _grid->ShmBufferTranslate(recv_from_rank,recv_buf);
if ( test_ptr != NULL ) shm_recv = 1;
// static int printed;
// if (!printed){
// std::cout << " GATHER FAST PATH SHM "<<shm_send<< " "<<shm_recv<<std::endl;
// printed = 1;
// }
#else
////////////////////////////////////////////////////////
// Gather locally
////////////////////////////////////////////////////////
send_buf = this->u_send_buf_p; // Gather locally, must send
assert(send_buf!=NULL);
#endif
// std::cout << " GatherPlaneSimple partial send "<< comms_partial_send<<std::endl;
compressor::Gather_plane_simple(face_table[face_idx],rhs,send_buf,compress,comm_off,so,comms_partial_send);
@ -1162,10 +1217,13 @@ public:
// Build a list of things to do after we synchronise GPUs
// Start comms now???
///////////////////////////////////////////////////////////
int do_send = (comms_send|comms_partial_send) && (!shm_send );
int do_recv = (comms_send|comms_partial_send) && (!shm_recv );
AddPacket((void *)&send_buf[comm_off],
(void *)&recv_buf[comm_off],
xmit_to_rank, comms_send|comms_partial_send,
recv_from_rank, comms_recv|comms_partial_recv,
xmit_to_rank, do_send,
recv_from_rank, do_recv,
xbytes,rbytes);
}
@ -1307,19 +1365,47 @@ public:
int recv_from_rank;
int xmit_to_rank;
int shm_send=0;
int shm_recv=0;
_grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
#ifdef SHM_FAST_PATH
#warning STENCIL SHM FAST PATH SELECTED
// shm == receive pointer if offnode
// shm == Translate[send pointer] if on node -- my view of his send pointer
cobj *shm = (cobj *) _grid->ShmBufferTranslate(recv_from_rank,sp);
if (shm==NULL) {
shm = rp;
// we found a packet that comes from MPI and contributes to this shift.
// is_same_node is only used in the WilsonStencil, and gets set for this point in the stencil.
// Kernel will add the exterior_terms except if is_same_node.
// leg of stencil
shm_recv=0;
} else {
shm_recv=1;
}
rpointers[i] = shm;
// Test send side
void *test_ptr = (void *) _grid->ShmBufferTranslate(xmit_to_rank,sp);
if ( test_ptr != NULL ) shm_send = 1;
// static int printed;
// if (!printed){
// std::cout << " GATHERSIMD FAST PATH SHM "<<shm_send<< " "<<shm_recv<<std::endl;
// printed = 1;
// }
#else
rpointers[i] = rp;
#endif
int duplicate = CheckForDuplicate(dimension,sx,nbr_proc,(void *)rp,i,xbytes,rbytes,cbmask);
if ( !duplicate ) {
if ( (bytes != rbytes) && (rbytes!=0) ){
acceleratorMemSet(rp,0,bytes); // Zero prefill comms buffer to zero
}
int do_send = (comms_send|comms_partial_send) && (!shm_send );
int do_recv = (comms_send|comms_partial_send) && (!shm_recv );
AddPacket((void *)sp,(void *)rp,
xmit_to_rank,comms_send|comms_partial_send,
recv_from_rank,comms_recv|comms_partial_recv,
xmit_to_rank,do_send,
recv_from_rank,do_send,
xbytes,rbytes);
}
@ -1329,7 +1415,7 @@ public:
}
}
// rpointer may be doing a remote read in the gather over SHM
if ( comms_recv|comms_partial_recv ) {
AddMerge(&this->u_recv_buf_p[comm_off],rpointers,reduced_buffer_size,permute_type,Mergers);
}

View File

@ -248,17 +248,23 @@ inline int acceleratorIsCommunicable(void *ptr)
//////////////////////////////////////////////
// SyCL acceleration
//////////////////////////////////////////////
#ifdef GRID_SYCL
NAMESPACE_END(Grid);
#include <CL/sycl.hpp>
#include <CL/sycl/usm.hpp>
#ifdef GRID_SYCL
#define GRID_SYCL_LEVEL_ZERO_IPC
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
NAMESPACE_END(Grid);
#if 0
#include <CL/sycl.hpp>
#include <CL/sycl/usm.hpp>
#include <level_zero/ze_api.h>
#include <CL/sycl/backend/level_zero.hpp>
#else
#include <sycl/CL/sycl.hpp>
#include <sycl/usm.hpp>
#include <level_zero/ze_api.h>
#include <sycl/ext/oneapi/backend/level_zero.hpp>
#endif
NAMESPACE_BEGIN(Grid);
extern cl::sycl::queue *theGridAccelerator;