1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-23 18:22:02 +01:00

Compare commits

..

26 Commits

Author SHA1 Message Date
5d7e0d18b9 virtual destructor for LinearOperator 2023-04-07 14:30:38 +01:00
4072408b6f Update README.md 2023-04-07 11:45:28 +01:00
bd76b47fbf Update CI badge in README 2023-04-07 11:44:48 +01:00
18ce23aa75 Fix NEON SIMD 2023-04-06 11:30:48 +01:00
ffa7fe0cc2 Merge branch 'feature/dirichlet' into develop 2023-04-04 23:13:52 -04:00
6b979f0a69 Dirichlet improvements that I failed to commit 2023-04-04 23:13:17 -04:00
fc4db5e963 Merge branch 'feature/dirichlet' of https://github.com/paboyle/Grid into feature/dirichlet 2023-04-03 18:26:11 -04:00
6252ffaf76 No unified 2023-04-03 18:25:22 -04:00
af64c1c6b6 Had managed to drop the accelerator_barrier() in the Wilson Compressor gather 2023-03-30 17:34:44 -04:00
866f48391a Temporary fix for develop incorrect results 2023-03-30 17:10:13 -04:00
a4df527d74 Merge pull request #428 from mmphys/bugfix/comm_none
Fixes for --enable-comms=none
2023-03-30 08:38:14 -04:00
5764d21161 Fixes for --enable-comms=none 2023-03-30 10:15:28 +01:00
496d04cd85 Weaken the Fence 2023-03-29 18:58:51 -04:00
10e6d7c6ce Merge branch 'feature/dirichlet' into develop 2023-03-29 16:26:47 -04:00
c42e25e5b8 Dirichlet remove 2023-03-29 16:25:52 -04:00
a00ae981e0 Fence propagation from SYCL 2023-03-29 15:00:40 -04:00
58e020b62a Merge branch 'feature/dirichlet' of https://github.com/paboyle/Grid into feature/dirichlet 2023-03-29 14:37:40 -04:00
a7e1aceeca Compile fix on Nvidia 2023-03-29 14:36:50 -04:00
7212432f43 More careful fencing 2023-03-28 20:10:22 -07:00
4a261fab30 Changes premerge to develop 2023-03-28 20:04:21 -07:00
6af97069b9 Preparing for close of feature/dirichlet
Initial code change review complete
2023-03-28 13:39:44 -07:00
5068413cdb Merge branch 'feature/dirichlet' of https://github.com/paboyle/Grid into feature/dirichlet 2023-03-28 08:35:38 -07:00
71c6960eea Commet 2023-03-28 08:34:24 -07:00
ddf6d5c9e3 Merge branch 'feature/dirichlet' of https://github.com/paboyle/Grid into feature/dirichlet 2023-03-28 11:33:05 -04:00
5c85774ee3 Merge branch 'feature/dirichlet' of https://github.com/paboyle/Grid into feature/dirichlet 2023-03-24 15:40:57 -04:00
d8a9a745d8 stream synchronise 2023-03-24 15:40:30 -04:00
42 changed files with 117 additions and 433 deletions

View File

@ -542,6 +542,7 @@ public:
(*this)(in[i], out[i]);
}
}
virtual ~LinearFunction(){};
};
template<class Field> class IdentityLinearFunction : public LinearFunction<Field> {

View File

@ -191,7 +191,7 @@ public:
std::cout << GridLogMessage << "\tAxpyNorm " << AxpyNormTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tLinearComb " << LinearCombTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMobius flop rate " << DwfFlops/ usecs<< " Gflops " <<std::endl;
std::cout << GridLogDebug << "\tMobius flop rate " << DwfFlops/ usecs<< " Gflops " <<std::endl;
if (ErrorOnNoConverge) assert(true_residual / Tolerance < 10000.0);

View File

@ -128,7 +128,7 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
int recv_from_rank,int dor,
int xbytes,int rbytes, int dir)
{
return 2.0*bytes;
return xbytes+rbytes;
}
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int dir)
{

View File

@ -91,6 +91,59 @@ void *SharedMemory::ShmBufferSelf(void)
//std::cerr << "ShmBufferSelf "<<ShmRank<<" "<<std::hex<< ShmCommBufs[ShmRank] <<std::dec<<std::endl;
return ShmCommBufs[ShmRank];
}
static inline int divides(int a,int b)
{
return ( b == ( (b/a)*a ) );
}
void GlobalSharedMemory::GetShmDims(const Coordinate &WorldDims,Coordinate &ShmDims)
{
////////////////////////////////////////////////////////////////
// Allow user to configure through environment variable
////////////////////////////////////////////////////////////////
char* str = getenv(("GRID_SHM_DIMS_" + std::to_string(ShmDims.size())).c_str());
if ( str ) {
std::vector<int> IntShmDims;
GridCmdOptionIntVector(std::string(str),IntShmDims);
assert(IntShmDims.size() == WorldDims.size());
long ShmSize = 1;
for (int dim=0;dim<WorldDims.size();dim++) {
ShmSize *= (ShmDims[dim] = IntShmDims[dim]);
assert(divides(ShmDims[dim],WorldDims[dim]));
}
assert(ShmSize == WorldShmSize);
return;
}
////////////////////////////////////////////////////////////////
// Powers of 2,3,5 only in prime decomposition for now
////////////////////////////////////////////////////////////////
int ndimension = WorldDims.size();
ShmDims=Coordinate(ndimension,1);
std::vector<int> primes({2,3,5});
int dim = 0;
int last_dim = ndimension - 1;
int AutoShmSize = 1;
while(AutoShmSize != WorldShmSize) {
int p;
for(p=0;p<primes.size();p++) {
int prime=primes[p];
if ( divides(prime,WorldDims[dim]/ShmDims[dim])
&& divides(prime,WorldShmSize/AutoShmSize) ) {
AutoShmSize*=prime;
ShmDims[dim]*=prime;
last_dim = dim;
break;
}
}
if (p == primes.size() && last_dim == dim) {
std::cerr << "GlobalSharedMemory::GetShmDims failed" << std::endl;
exit(EXIT_FAILURE);
}
dim=(dim+1) %ndimension;
}
}
NAMESPACE_END(Grid);

View File

@ -38,9 +38,8 @@ Author: Christoph Lehner <christoph@lhnr.de>
#include <hip/hip_runtime_api.h>
#endif
#ifdef GRID_SYCL
#endif
#define GRID_SYCL_LEVEL_ZERO_IPC
#endif
NAMESPACE_BEGIN(Grid);
@ -175,55 +174,6 @@ static inline int divides(int a,int b)
{
return ( b == ( (b/a)*a ) );
}
void GlobalSharedMemory::GetShmDims(const Coordinate &WorldDims,Coordinate &ShmDims)
{
////////////////////////////////////////////////////////////////
// Allow user to configure through environment variable
////////////////////////////////////////////////////////////////
char* str = getenv(("GRID_SHM_DIMS_" + std::to_string(ShmDims.size())).c_str());
if ( str ) {
std::vector<int> IntShmDims;
GridCmdOptionIntVector(std::string(str),IntShmDims);
assert(IntShmDims.size() == WorldDims.size());
long ShmSize = 1;
for (int dim=0;dim<WorldDims.size();dim++) {
ShmSize *= (ShmDims[dim] = IntShmDims[dim]);
assert(divides(ShmDims[dim],WorldDims[dim]));
}
assert(ShmSize == WorldShmSize);
return;
}
////////////////////////////////////////////////////////////////
// Powers of 2,3,5 only in prime decomposition for now
////////////////////////////////////////////////////////////////
int ndimension = WorldDims.size();
ShmDims=Coordinate(ndimension,1);
std::vector<int> primes({2,3,5});
int dim = 0;
int last_dim = ndimension - 1;
int AutoShmSize = 1;
while(AutoShmSize != WorldShmSize) {
int p;
for(p=0;p<primes.size();p++) {
int prime=primes[p];
if ( divides(prime,WorldDims[dim]/ShmDims[dim])
&& divides(prime,WorldShmSize/AutoShmSize) ) {
AutoShmSize*=prime;
ShmDims[dim]*=prime;
last_dim = dim;
break;
}
}
if (p == primes.size() && last_dim == dim) {
std::cerr << "GlobalSharedMemory::GetShmDims failed" << std::endl;
exit(EXIT_FAILURE);
}
dim=(dim+1) %ndimension;
}
}
void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
{
////////////////////////////////////////////////////////////////

View File

@ -211,25 +211,22 @@ inline typename vobj::scalar_objectD sumD_gpu_small(const vobj *lat, Integer osi
assert(ok);
Integer smemSize = numThreads * sizeof(sobj);
// UVM seems to be buggy under later CUDA drivers
// This fails on A100 and driver 5.30.02 / CUDA 12.1
// Fails with multiple NVCC versions back to 11.4,
// which worked with earlier drivers.
// Not sure which driver had first fail and this bears checking
// Is awkward as must install multiple driver versions
// Move out of UVM
// Turns out I had messed up the synchronise after move to compute stream
// as running this on the default stream fools the synchronise
#undef UVM_BLOCK_BUFFER
#ifndef UVM_BLOCK_BUFFER
commVector<sobj> buffer(numBlocks);
sobj *buffer_v = &buffer[0];
sobj result;
reduceKernel<<< numBlocks, numThreads, smemSize >>>(lat, buffer_v, size);
reduceKernel<<< numBlocks, numThreads, smemSize, computeStream >>>(lat, buffer_v, size);
accelerator_barrier();
acceleratorCopyFromDevice(buffer_v,&result,sizeof(result));
#else
Vector<sobj> buffer(numBlocks);
sobj *buffer_v = &buffer[0];
sobj result;
reduceKernel<<< numBlocks, numThreads, smemSize >>>(lat, buffer_v, size);
reduceKernel<<< numBlocks, numThreads, smemSize, computeStream >>>(lat, buffer_v, size);
accelerator_barrier();
result = *buffer_v;
#endif

View File

@ -440,17 +440,8 @@ public:
_grid->GlobalCoorToGlobalIndex(gcoor,gidx);
_grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
#if 1
assert(rank == _grid->ThisRank() );
#else
//
if (rank != _grid->ThisRank() ){
std::cout <<"rank "<<rank<<" _grid->ThisRank() "<<_grid->ThisRank()<< std::endl;
// exit(-42);
// assert(0);
}
#endif
assert(rank == _grid->ThisRank() );
int l_idx=generator_idx(o_idx,i_idx);
_generators[l_idx] = master_engine;

View File

@ -463,11 +463,7 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
if( interior && exterior ) {
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSite); return;}
#ifdef SYCL_HACK
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteSycl); return; }
#else
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSite); return;}
#endif
#ifndef GRID_CUDA
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSite); return;}
#endif
@ -478,6 +474,7 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteInt); return;}
#endif
} else if( exterior ) {
acceleratorFenceComputeStream();
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSiteExt); return;}
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteExt); return;}
#ifndef GRID_CUDA
@ -502,10 +499,9 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
#ifndef GRID_CUDA
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDag); return;}
#endif
acceleratorFenceComputeStream();
} else if( interior ) {
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSiteDagInt); return;}
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteDagInt); return;}
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALLNB(GenericDhopSiteDagInt); return;}
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALLNB(HandDhopSiteDagInt); return;}
#ifndef GRID_CUDA
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDagInt); return;}
#endif
@ -516,7 +512,6 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
#ifndef GRID_CUDA
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDagExt); return;}
#endif
acceleratorFenceComputeStream();
}
assert(0 && " Kernel optimisation case not covered ");
}

View File

@ -1 +0,0 @@
../CayleyFermion5DInstantiation.cc.master

View File

@ -1 +0,0 @@
../ContinuedFractionFermion5DInstantiation.cc.master

View File

@ -1 +0,0 @@
../DomainWallEOFAFermionInstantiation.cc.master

View File

@ -1 +0,0 @@
../MobiusEOFAFermionInstantiation.cc.master

View File

@ -1 +0,0 @@
../PartialFractionFermion5DInstantiation.cc.master

View File

@ -1 +0,0 @@
../WilsonCloverFermionInstantiation.cc.master

View File

@ -1 +0,0 @@
../WilsonFermion5DInstantiation.cc.master

View File

@ -1 +0,0 @@
../WilsonFermionInstantiation.cc.master

View File

@ -1 +0,0 @@
../WilsonKernelsInstantiation.cc.master

View File

@ -1 +0,0 @@
../WilsonTMFermionInstantiation.cc.master

View File

@ -1 +0,0 @@
#define IMPLEMENTATION WilsonImplD2

View File

@ -1 +0,0 @@
../CayleyFermion5DInstantiation.cc.master

View File

@ -1 +0,0 @@
../ContinuedFractionFermion5DInstantiation.cc.master

View File

@ -1 +0,0 @@
../DomainWallEOFAFermionInstantiation.cc.master

View File

@ -1 +0,0 @@
../MobiusEOFAFermionInstantiation.cc.master

View File

@ -1 +0,0 @@
../PartialFractionFermion5DInstantiation.cc.master

View File

@ -1 +0,0 @@
../WilsonFermion5DInstantiation.cc.master

View File

@ -1 +0,0 @@
../WilsonKernelsInstantiation.cc.master

View File

@ -1 +0,0 @@
#define IMPLEMENTATION ZWilsonImplD2

View File

@ -38,19 +38,15 @@ NAMESPACE_BEGIN(Grid);
// cf. GeneralEvenOddRational.h for details
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<class ImplD, class ImplF, class ImplD2>
template<class ImplD, class ImplF>
class GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction : public GeneralEvenOddRatioRationalPseudoFermionAction<ImplD> {
private:
typedef typename ImplD2::FermionField FermionFieldD2;
typedef typename ImplD::FermionField FermionFieldD;
typedef typename ImplF::FermionField FermionFieldF;
FermionOperator<ImplD> & NumOpD;
FermionOperator<ImplD> & DenOpD;
FermionOperator<ImplD2> & NumOpD2;
FermionOperator<ImplD2> & DenOpD2;
FermionOperator<ImplF> & NumOpF;
FermionOperator<ImplF> & DenOpF;
@ -64,40 +60,31 @@ NAMESPACE_BEGIN(Grid);
ConjugateGradientMultiShift<FermionFieldD> msCG(MaxIter, approx);
msCG(schurOp,in, out);
#else
SchurDifferentiableOperator<ImplD2> schurOpD2(numerator ? NumOpD2 : DenOpD2);
SchurDifferentiableOperator<ImplD> schurOpD(numerator ? NumOpD : DenOpD);
SchurDifferentiableOperator<ImplF> schurOpF(numerator ? NumOpF : DenOpF);
FermionFieldD2 inD2(NumOpD2.FermionRedBlackGrid());
FermionFieldD2 outD2(NumOpD2.FermionRedBlackGrid());
FermionFieldD inD(NumOpD.FermionRedBlackGrid());
FermionFieldD outD(NumOpD.FermionRedBlackGrid());
// Action better with higher precision?
ConjugateGradientMultiShiftMixedPrec<FermionFieldD2, FermionFieldF> msCG(MaxIter, approx, NumOpF.FermionRedBlackGrid(), schurOpF, ReliableUpdateFreq);
precisionChange(inD2,in);
std::cout << "msCG single solve "<<norm2(inD2)<<" " <<norm2(in)<<std::endl;
msCG(schurOpD2, inD2, outD2);
precisionChange(out,outD2);
ConjugateGradientMultiShiftMixedPrec<FermionFieldD, FermionFieldF> msCG(MaxIter, approx, NumOpF.FermionRedBlackGrid(), schurOpF, ReliableUpdateFreq);
msCG(schurOpD, in, out);
#endif
}
virtual void multiShiftInverse(bool numerator, const MultiShiftFunction &approx, const Integer MaxIter, const FermionFieldD &in, std::vector<FermionFieldD> &out_elems, FermionFieldD &out){
SchurDifferentiableOperator<ImplD2> schurOpD2(numerator ? NumOpD2 : DenOpD2);
SchurDifferentiableOperator<ImplD> schurOpD(numerator ? NumOpD : DenOpD);
SchurDifferentiableOperator<ImplF> schurOpF (numerator ? NumOpF : DenOpF);
FermionFieldD2 inD2(NumOpD2.FermionRedBlackGrid());
FermionFieldD2 outD2(NumOpD2.FermionRedBlackGrid());
std::vector<FermionFieldD2> out_elemsD2(out_elems.size(),NumOpD2.FermionRedBlackGrid());
ConjugateGradientMultiShiftMixedPrecCleanup<FermionFieldD2, FermionFieldF> msCG(MaxIter, approx, NumOpF.FermionRedBlackGrid(), schurOpF, ReliableUpdateFreq);
precisionChange(inD2,in);
std::cout << "msCG in "<<norm2(inD2)<<" " <<norm2(in)<<std::endl;
msCG(schurOpD2, inD2, out_elemsD2, outD2);
precisionChange(out,outD2);
for(int i=0;i<out_elems.size();i++){
precisionChange(out_elems[i],out_elemsD2[i]);
}
FermionFieldD inD(NumOpD.FermionRedBlackGrid());
FermionFieldD outD(NumOpD.FermionRedBlackGrid());
std::vector<FermionFieldD> out_elemsD(out_elems.size(),NumOpD.FermionRedBlackGrid());
ConjugateGradientMultiShiftMixedPrecCleanup<FermionFieldD, FermionFieldF> msCG(MaxIter, approx, NumOpF.FermionRedBlackGrid(), schurOpF, ReliableUpdateFreq);
msCG(schurOpD, in, out_elems, out);
}
//Allow derived classes to override the gauge import
virtual void ImportGauge(const typename ImplD::GaugeField &Ud){
typename ImplF::GaugeField Uf(NumOpF.GaugeGrid());
typename ImplD2::GaugeField Ud2(NumOpD2.GaugeGrid());
typename ImplD::GaugeField Ud2(NumOpD.GaugeGrid());
precisionChange(Uf, Ud);
precisionChange(Ud2, Ud);
@ -109,20 +96,18 @@ NAMESPACE_BEGIN(Grid);
NumOpF.ImportGauge(Uf);
DenOpF.ImportGauge(Uf);
NumOpD2.ImportGauge(Ud2);
DenOpD2.ImportGauge(Ud2);
NumOpD.ImportGauge(Ud2);
DenOpD.ImportGauge(Ud2);
}
public:
GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction(FermionOperator<ImplD> &_NumOpD, FermionOperator<ImplD> &_DenOpD,
FermionOperator<ImplF> &_NumOpF, FermionOperator<ImplF> &_DenOpF,
FermionOperator<ImplD2> &_NumOpD2, FermionOperator<ImplD2> &_DenOpD2,
const RationalActionParams & p, Integer _ReliableUpdateFreq
) : GeneralEvenOddRatioRationalPseudoFermionAction<ImplD>(_NumOpD, _DenOpD, p),
ReliableUpdateFreq(_ReliableUpdateFreq),
NumOpD(_NumOpD), DenOpD(_DenOpD),
NumOpF(_NumOpF), DenOpF(_DenOpF),
NumOpD2(_NumOpD2), DenOpD2(_DenOpD2)
NumOpF(_NumOpF), DenOpF(_DenOpF)
{}
virtual std::string action_name(){return "GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction";}

View File

@ -67,9 +67,9 @@ NAMESPACE_BEGIN(Grid);
virtual std::string action_name(){return "OneFlavourEvenOddRatioRationalPseudoFermionAction";}
};
template<class Impl,class ImplF,class ImplD2>
template<class Impl,class ImplF>
class OneFlavourEvenOddRatioRationalMixedPrecPseudoFermionAction
: public GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<Impl,ImplF,ImplD2> {
: public GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<Impl,ImplF> {
public:
typedef OneFlavourRationalParams Params;
private:
@ -91,11 +91,9 @@ NAMESPACE_BEGIN(Grid);
FermionOperator<Impl> &_DenOp,
FermionOperator<ImplF> &_NumOpF,
FermionOperator<ImplF> &_DenOpF,
FermionOperator<ImplD2> &_NumOpD2,
FermionOperator<ImplD2> &_DenOpD2,
const Params & p, Integer ReliableUpdateFreq
) :
GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<Impl,ImplF,ImplD2>(_NumOp, _DenOp,_NumOpF, _DenOpF,_NumOpD2, _DenOpD2, transcribe(p),ReliableUpdateFreq){}
GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<Impl,ImplF>(_NumOp, _DenOp,_NumOpF, _DenOpF, transcribe(p),ReliableUpdateFreq){}
virtual std::string action_name(){return "OneFlavourEvenOddRatioRationalPseudoFermionAction";}
};

View File

@ -112,40 +112,27 @@ NAMESPACE_BEGIN(Grid);
// NumOp == V
// DenOp == M
//
AUDIT();
FermionField etaOdd (NumOp.FermionRedBlackGrid());
FermionField etaEven(NumOp.FermionRedBlackGrid());
FermionField tmp (NumOp.FermionRedBlackGrid());
AUDIT();
pickCheckerboard(Even,etaEven,eta);
AUDIT();
pickCheckerboard(Odd,etaOdd,eta);
AUDIT();
NumOp.ImportGauge(U);
AUDIT();
DenOp.ImportGauge(U);
std::cout << " TwoFlavourRefresh: Imported gauge "<<std::endl;
AUDIT();
SchurDifferentiableOperator<Impl> Mpc(DenOp);
AUDIT();
SchurDifferentiableOperator<Impl> Vpc(NumOp);
AUDIT();
std::cout << " TwoFlavourRefresh: Diff ops "<<std::endl;
AUDIT();
// Odd det factors
Mpc.MpcDag(etaOdd,PhiOdd);
AUDIT();
std::cout << " TwoFlavourRefresh: MpcDag "<<std::endl;
tmp=Zero();
AUDIT();
std::cout << " TwoFlavourRefresh: Zero() guess "<<std::endl;
AUDIT();
HeatbathSolver(Vpc,PhiOdd,tmp);
AUDIT();
std::cout << " TwoFlavourRefresh: Heatbath solver "<<std::endl;
Vpc.Mpc(tmp,PhiOdd);
std::cout << " TwoFlavourRefresh: Mpc "<<std::endl;

View File

@ -134,14 +134,12 @@ protected:
double start_force = usecond();
std::cout << GridLogMessage << "AuditForce["<<level<<"]["<<a<<"] before"<<std::endl;
AUDIT();
as[level].actions.at(a)->deriv_timer_start();
as[level].actions.at(a)->deriv(Us, force); // deriv should NOT include Ta
as[level].actions.at(a)->deriv_timer_stop();
std::cout << GridLogMessage << "AuditForce["<<level<<"]["<<a<<"] after"<<std::endl;
AUDIT();
std::cout << GridLogIntegrator << "Smearing (on/off): " << as[level].actions.at(a)->is_smeared << std::endl;
auto name = as[level].actions.at(a)->action_name();
@ -382,12 +380,12 @@ public:
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
std::cout << GridLogMessage << "AuditRefresh["<<level<<"]["<<actionID<<"] before"<<std::endl;
AUDIT();
as[level].actions.at(actionID)->refresh_timer_start();
as[level].actions.at(actionID)->refresh(Us, sRNG, pRNG);
as[level].actions.at(actionID)->refresh_timer_stop();
std::cout << GridLogMessage << "AuditRefresh["<<level<<"]["<<actionID<<"] after"<<std::endl;
AUDIT();
}
// Refresh the higher representation actions
@ -424,7 +422,7 @@ public:
// Actions
for (int level = 0; level < as.size(); ++level) {
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
AUDIT();
// get gauge field from the SmearingPolicy and
// based on the boolean is_smeared in actionID
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
@ -434,7 +432,7 @@ public:
as[level].actions.at(actionID)->S_timer_stop();
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] H = " << Hterm << std::endl;
H += Hterm;
AUDIT();
}
as[level].apply(S_hireps, Representations, level, H);
}
@ -447,9 +445,9 @@ public:
void operator()(std::vector<Action<FieldType>*> repr_set, Repr& Rep, int level, RealD& H) {
for (int a = 0; a < repr_set.size(); ++a) {
AUDIT();
RealD Hterm = repr_set.at(a)->Sinitial(Rep.U);
AUDIT();
std::cout << GridLogMessage << "Sinitial Level " << level << " term " << a << " H Hirep = " << Hterm << std::endl;
H += Hterm;
@ -474,10 +472,10 @@ public:
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] action eval " << std::endl;
as[level].actions.at(actionID)->S_timer_start();
AUDIT();
Hterm = as[level].actions.at(actionID)->Sinitial(Us);
as[level].actions.at(actionID)->S_timer_stop();
AUDIT();
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] H = " << Hterm << std::endl;
H += Hterm;
}
@ -490,7 +488,6 @@ public:
void integrate(Field& U)
{
AUDIT();
// reset the clocks
t_U = 0;
for (int level = 0; level < as.size(); ++level) {
@ -508,10 +505,8 @@ public:
assert(fabs(t_U - t_P[level]) < 1.0e-6); // must be the same
std::cout << GridLogIntegrator << " times[" << level << "]= " << t_P[level] << " " << t_U << std::endl;
}
AUDIT();
FieldImplementation::Project(U);
AUDIT();
// and that we indeed got to the end of the trajectory
assert(fabs(t_U - Params.trajL) < 1.0e-6);

View File

@ -320,7 +320,7 @@ struct Conj{
struct TimesMinusI{
//Complex single
inline float32x4_t operator()(float32x4_t in, float32x4_t ret){
inline float32x4_t operator()(float32x4_t in){
// ar ai br bi -> ai -ar ai -br
float32x4_t r0, r1;
r0 = vnegq_f32(in); // -ar -ai -br -bi
@ -328,7 +328,7 @@ struct TimesMinusI{
return vtrn1q_f32(r1, r0); // ar -ai br -bi
}
//Complex double
inline float64x2_t operator()(float64x2_t in, float64x2_t ret){
inline float64x2_t operator()(float64x2_t in){
// a ib -> b -ia
float64x2_t tmp;
tmp = vnegq_f64(in);
@ -338,7 +338,7 @@ struct TimesMinusI{
struct TimesI{
//Complex single
inline float32x4_t operator()(float32x4_t in, float32x4_t ret){
inline float32x4_t operator()(float32x4_t in){
// ar ai br bi -> -ai ar -bi br
float32x4_t r0, r1;
r0 = vnegq_f32(in); // -ar -ai -br -bi
@ -346,7 +346,7 @@ struct TimesI{
return vtrn1q_f32(r1, in); // -ai ar -bi br
}
//Complex double
inline float64x2_t operator()(float64x2_t in, float64x2_t ret){
inline float64x2_t operator()(float64x2_t in){
// a ib -> -b ia
float64x2_t tmp;
tmp = vnegq_f64(in);

View File

@ -434,6 +434,7 @@ public:
////////////////////////////////////////////////////////////////////////
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
{
accelerator_barrier();
for(int i=0;i<Packets.size();i++){
_grid->StencilSendToRecvFromBegin(MpiReqs,
Packets[i].send_buf,
@ -665,9 +666,11 @@ public:
for(int i=0;i<mm.size();i++){
decompressor::MergeFace(decompress,mm[i]);
}
if ( mm.size() ) acceleratorFenceComputeStream();
for(int i=0;i<dd.size();i++){
decompressor::DecompressFace(decompress,dd[i]);
}
if ( dd.size() ) acceleratorFenceComputeStream();
}
////////////////////////////////////////
// Set up routines

View File

@ -458,7 +458,8 @@ inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream);
// Common on all GPU targets
//////////////////////////////////////////////
#if defined(GRID_SYCL) || defined(GRID_CUDA) || defined(GRID_HIP)
#define accelerator_forNB( iter1, num1, nsimd, ... ) accelerator_for2dNB( iter1, num1, iter2, 1, nsimd, {__VA_ARGS__} );
// FIXME -- the non-blocking nature got broken March 30 2023 by PAB
#define accelerator_forNB( iter1, num1, nsimd, ... ) accelerator_for2dNB( iter1, num1, iter2, 1, nsimd, {__VA_ARGS__} );
#define accelerator_for( iter, num, nsimd, ... ) \
accelerator_forNB(iter, num, nsimd, { __VA_ARGS__ } ); \
@ -525,7 +526,7 @@ inline void acceleratorFreeCpu (void *ptr){free(ptr);};
//////////////////////////////////////////////
#ifdef GRID_SYCL
inline void acceleratorFenceComputeStream(void){ accelerator_barrier();};
inline void acceleratorFenceComputeStream(void){ theGridAccelerator->submit_barrier();};
#else
// Ordering within a stream guaranteed on Nvidia & AMD
inline void acceleratorFenceComputeStream(void){ };

View File

@ -451,7 +451,7 @@ int main(int argc, char **argv) {
#define MIXED_PRECISION
#ifdef MIXED_PRECISION
std::vector<GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicy,FermionImplPolicyF,FermionImplPolicy> *> Bdys;
std::vector<GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicy,FermionImplPolicyF> *> Bdys;
#else
std::vector<GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> *> Bdys;
#endif
@ -526,15 +526,13 @@ int main(int argc, char **argv) {
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],*MPCG[h],*ActionMPCG[h],CG));
} else {
#ifdef MIXED_PRECISION
Bdys.push_back( new GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicy,FermionImplPolicyF,FermionImplPolicy>(
Bdys.push_back( new GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicy,FermionImplPolicyF>(
*Numerators[h],*Denominators[h],
*NumeratorsF[h],*DenominatorsF[h],
*Numerators[h],*Denominators[h],
OFRp, SP_iters) );
Bdys.push_back( new GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicy,FermionImplPolicyF,FermionImplPolicy>(
Bdys.push_back( new GeneralEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicy,FermionImplPolicyF>(
*Numerators[h],*Denominators[h],
*NumeratorsF[h],*DenominatorsF[h],
*Numerators[h],*Denominators[h],
OFRp, SP_iters) );
#else
Bdys.push_back( new GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],OFRp));

View File

@ -164,11 +164,6 @@ int main(int argc, char **argv) {
typedef MobiusEOFAFermionF FermionEOFAActionF;
typedef typename FermionActionF::FermionField FermionFieldF;
typedef WilsonImplD2 FermionImplPolicyD2;
typedef MobiusFermionD2 FermionActionD2;
typedef MobiusEOFAFermionD2 FermionEOFAActionD2;
typedef typename FermionActionD2::FermionField FermionFieldD2;
typedef Grid::XmlReader Serialiser;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
@ -272,7 +267,6 @@ int main(int argc, char **argv) {
// temporarily need a gauge field
LatticeGaugeFieldD U(GridPtr); U=Zero();
LatticeGaugeFieldF UF(GridPtrF); UF=Zero();
LatticeGaugeFieldD2 UD2(GridPtrF); UD2=Zero();
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
TheHMC.ReadCommandLine(argc,argv); // params on CML or from param file
@ -394,15 +388,13 @@ int main(int argc, char **argv) {
std::vector<FermionAction *> Denominators;
std::vector<FermionActionF *> NumeratorsF;
std::vector<FermionActionF *> DenominatorsF;
std::vector<FermionActionD2 *> NumeratorsD2;
std::vector<FermionActionD2 *> DenominatorsD2;
std::vector<TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy> *> Quotients;
std::vector<MxPCG *> ActionMPCG;
std::vector<MxPCG *> MPCG;
#define MIXED_PRECISION
#ifdef MIXED_PRECISION
std::vector<OneFlavourEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicy,FermionImplPolicyF,FermionImplPolicyD2> *> Bdys;
std::vector<OneFlavourEvenOddRatioRationalMixedPrecPseudoFermionAction<FermionImplPolicy,FermionImplPolicyF> *> Bdys;
#else
std::vector<OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> *> Bdys;
#endif

View File

@ -1,7 +1,8 @@
# Grid [![Teamcity status](http://ci.cliath.ph.ed.ac.uk/app/rest/builds/aggregated/strob:(buildType:(affectedProject(id:GridBasedSoftware_Grid)),branch:name:develop)/statusIcon.svg)](http://ci.cliath.ph.ed.ac.uk/project.html?projectId=GridBasedSoftware_Grid&tab=projectOverview)
# Grid
**Data parallel C++ mathematical object library.**
[![Teamcity status](https://ci.dev.dirac.ed.ac.uk/guestAuth/app/rest/builds/aggregated/strob:(buildType:(affectedProject(id:GridBasedSoftware_Grid)),branch:default:true)/statusIcon.svg)](https://ci.dev.dirac.ed.ac.uk/project/GridBasedSoftware_Grid?mode=builds)
License: GPL v2.
Last update June 2017.

View File

@ -1,7 +1,7 @@
CLIME=`spack find --paths c-lime@2-3-9 | grep c-lime| cut -c 15-`
../../configure --enable-comms=mpi-auto \
--with-lime=$CLIME \
--enable-unified=yes \
--enable-unified=no \
--enable-shm=nvlink \
--enable-tracing=timer \
--enable-accelerator=hip \

View File

@ -5,8 +5,8 @@ module load emacs
#module load gperftools
module load PrgEnv-gnu
module load rocm/5.3.0
module load cray-mpich/8.1.16
#module load cray-mpich/8.1.17
#module load cray-mpich/8.1.16
module load cray-mpich/8.1.17
module load gmp
module load cray-fftw
module load craype-accel-amd-gfx90a

View File

@ -73,12 +73,12 @@ int main (int argc, char ** argv)
RealD M5 =1.8;
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
std::cout<<GridLogMessage <<"DomainWallFermion vectorised test"<<std::endl;
std::cout<<GridLogMessage <<"DomainWallFermion test"<<std::endl;
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
std::vector<Complex> boundary = {1,1,1,-1};
DomainWallFermionD::ImplParams Params(boundary);
Coordinate Dirichlet({0,8,8,16,32});
Params.dirichlet=Dirichlet;
// Coordinate Dirichlet({0,8,8,16,32});
// Params.dirichlet=Dirichlet;
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,Params);
TestWhat<DomainWallFermionD>(Ddwf,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);

View File

@ -53,7 +53,7 @@ static int readInt(int* argc, char*** argv, std::string&& option, int defaultVal
static float readFloat(int* argc, char*** argv, std::string&& option, float defaultValue) {
std::string arg;
float ret = defaultValue;
double ret = defaultValue;
if(checkPresent(argc, argv, option)) {
arg = getContent(argc, argv, option);
GridCmdOptionFloat(arg, ret);

View File

@ -1,244 +0,0 @@
/*************************************************************************************
Gamma::Algebra Gmu [] = {
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
Gamma::Algebra::GammaZ,
Gamma::Algebra::GammaT,
Gamma::Algebra::Gamma5
};
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
int threads = GridThread::GetThreads();
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
Coordinate latt_size = GridDefaultLatt();
Coordinate simd_layout = GridDefaultSimd(Nd,vComplexD::Nsimd());
Coordinate mpi_layout = GridDefaultMpi();
int vol = 1;
for(int d=0;d<latt_size.size();d++){
vol = vol * latt_size[d];
}
GridCartesian GRID(latt_size,simd_layout,mpi_layout);
GridRedBlackCartesian RBGRID(&GRID);
LatticeComplexD coor(&GRID);
ComplexD ci(0.0,1.0);
std::vector<int> seeds({1,2,3,4});
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(seeds); // naughty seeding
GridParallelRNG pRNG(&GRID);
pRNG.SeedFixedIntegers(seeds);
LatticeGaugeFieldD Umu(&GRID);
SU<Nc>::ColdConfiguration(pRNG,Umu); // Unit gauge
////////////////////////////////////////////////////
// Wilson test
////////////////////////////////////////////////////
{
LatticeFermionD src(&GRID); gaussian(pRNG,src);
LatticeFermionD src_p(&GRID);
LatticeFermionD tmp(&GRID);
LatticeFermionD ref(&GRID);
LatticeFermionD result(&GRID);
RealD mass=0.1;
WilsonFermionD Dw(Umu,GRID,RBGRID,mass);
Dw.M(src,ref);
std::cout << "Norm src "<<norm2(src)<<std::endl;
std::cout << "Norm Dw x src "<<norm2(ref)<<std::endl;
{
FFT theFFT(&GRID);
////////////////
// operator in Fourier space
////////////////
tmp =ref;
theFFT.FFT_all_dim(result,tmp,FFT::forward);
std::cout<<"FFT[ Dw x src ] "<< norm2(result)<<std::endl;
tmp = src;
theFFT.FFT_all_dim(src_p,tmp,FFT::forward);
std::cout<<"FFT[ src ] "<< norm2(src_p)<<std::endl;
/////////////////////////////////////////////////////////////////
// work out the predicted FT from Fourier
/////////////////////////////////////////////////////////////////
auto FGrid = &GRID;
LatticeFermionD Kinetic(FGrid); Kinetic = Zero();
LatticeComplexD kmu(FGrid);
LatticeInteger scoor(FGrid);
LatticeComplexD sk (FGrid); sk = Zero();
LatticeComplexD sk2(FGrid); sk2= Zero();
LatticeComplexD W(FGrid); W= Zero();
LatticeComplexD one(FGrid); one =ComplexD(1.0,0.0);
ComplexD ci(0.0,1.0);
for(int mu=0;mu<Nd;mu++) {
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
LatticeCoordinate(kmu,mu);
kmu = TwoPiL * kmu;
sk2 = sk2 + 2.0*sin(kmu*0.5)*sin(kmu*0.5);
sk = sk + sin(kmu) *sin(kmu);
// -1/2 Dw -> 1/2 gmu (eip - emip) = i sinp gmu
Kinetic = Kinetic + sin(kmu)*ci*(Gamma(Gmu[mu])*src_p);
}
W = mass + sk2;
Kinetic = Kinetic + W * src_p;
std::cout<<"Momentum space src "<< norm2(src_p)<<std::endl;
std::cout<<"Momentum space Dw x src "<< norm2(Kinetic)<<std::endl;
std::cout<<"FT[Coordinate space Dw] "<< norm2(result)<<std::endl;
result = result - Kinetic;
std::cout<<"diff "<< norm2(result)<<std::endl;
}
std::cout << " =======================================" <<std::endl;
std::cout << " Checking FourierFreePropagator x Dw = 1" <<std::endl;
std::cout << " =======================================" <<std::endl;
std::cout << "Dw src = " <<norm2(src)<<std::endl;
std::cout << "Dw tmp = " <<norm2(tmp)<<std::endl;
Dw.M(src,tmp);
Dw.FreePropagator(tmp,ref,mass);
std::cout << "Dw ref = " <<norm2(ref)<<std::endl;
ref = ref - src;
std::cout << "Dw ref-src = " <<norm2(ref)<<std::endl;
}
////////////////////////////////////////////////////
// Wilson prop
////////////////////////////////////////////////////
{
std::cout<<"****************************************"<<std::endl;
std::cout << "Wilson Mom space 4d propagator \n";
std::cout<<"****************************************"<<std::endl;
LatticeFermionD src(&GRID); gaussian(pRNG,src);
LatticeFermionD tmp(&GRID);
LatticeFermionD ref(&GRID);
LatticeFermionD diff(&GRID);
src=Zero();
Coordinate point(4,0); // 0,0,0,0
SpinColourVectorD ferm;
ferm=Zero();
ferm()(0)(0) = ComplexD(1.0);
pokeSite(ferm,src,point);
RealD mass=0.1;
WilsonFermionD Dw(Umu,GRID,RBGRID,mass);
// Momentum space prop
std::cout << " Solving by FFT and Feynman rules" <<std::endl;
Dw.FreePropagator(src,ref,mass) ;
Gamma G5(Gamma::Algebra::Gamma5);
LatticeFermionD result(&GRID);
const int sdir=0;
////////////////////////////////////////////////////////////////////////
// Conjugate gradient on normal equations system
////////////////////////////////////////////////////////////////////////
std::cout << " Solving by Conjugate Gradient (CGNE)" <<std::endl;
Dw.Mdag(src,tmp);
src=tmp;
MdagMLinearOperator<WilsonFermionD,LatticeFermionD> HermOp(Dw);
ConjugateGradient<LatticeFermionD> CG(1.0e-10,10000);
CG(HermOp,src,result);
////////////////////////////////////////////////////////////////////////
std::cout << " Taking difference" <<std::endl;
std::cout << "Dw result "<<norm2(result)<<std::endl;
std::cout << "Dw ref "<<norm2(ref)<<std::endl;
diff = ref - result;
std::cout << "result - ref "<<norm2(diff)<<std::endl;
DumpSliceNorm("Slice Norm Solution ",result,Nd-1);
}
////////////////////////////////////////////////////
//Gauge invariance test
////////////////////////////////////////////////////
{
std::cout<<"****************************************"<<std::endl;
std::cout << "Gauge invariance test \n";
std::cout<<"****************************************"<<std::endl;
LatticeGaugeField U_GT(&GRID); // Gauge transformed field
LatticeColourMatrix g(&GRID); // local Gauge xform matrix
U_GT = Umu;
// Make a random xform to teh gauge field
SU<Nc>::RandomGaugeTransform(pRNG,U_GT,g); // Unit gauge
LatticeFermionD src(&GRID);
LatticeFermionD tmp(&GRID);
LatticeFermionD ref(&GRID);
LatticeFermionD diff(&GRID);
// could loop over colors
src=Zero();
Coordinate point(4,0); // 0,0,0,0
SpinColourVectorD ferm;
ferm=Zero();
ferm()(0)(0) = ComplexD(1.0);
pokeSite(ferm,src,point);
RealD mass=0.1;
WilsonFermionD Dw(U_GT,GRID,RBGRID,mass);
// Momentum space prop
std::cout << " Solving by FFT and Feynman rules" <<std::endl;
Dw.FreePropagator(src,ref,mass) ;
Gamma G5(Gamma::Algebra::Gamma5);
LatticeFermionD result(&GRID);
const int sdir=0;
////////////////////////////////////////////////////////////////////////
// Conjugate gradient on normal equations system
////////////////////////////////////////////////////////////////////////
std::cout << " Solving by Conjugate Gradient (CGNE)" <<std::endl;
Dw.Mdag(src,tmp);
src=tmp;
MdagMLinearOperator<WilsonFermionD,LatticeFermionD> HermOp(Dw);
ConjugateGradient<LatticeFermionD> CG(1.0e-10,10000);
CG(HermOp,src,result);
////////////////////////////////////////////////////////////////////////
std::cout << " Taking difference" <<std::endl;
std::cout << "Dw result "<<norm2(result)<<std::endl;
std::cout << "Dw ref "<<norm2(ref)<<std::endl;
diff = ref - result;
std::cout << "result - ref "<<norm2(diff)<<std::endl;
DumpSliceNorm("Slice Norm Solution ",result,Nd-1);
}
Grid_finalize();
}