1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-08-02 04:37:06 +01:00

Started a tidy up in the HMC sector. Now comfortable with the two level integrators;

to a little figure out what Guido had done & why -- but there is a neat saving of force
evaluations across the nesting time boundary making use of linearity of the leapP in dt.

I cleaned up the printing, reduced the volume of code, in the process sharing printing
between all integrators. Placed an assert that the total integration time for all integrators
must match at end of trajectory.

Have now verified e-dH = 1 for nested integrators in Wilson/Wilson runs with both
Omelyan and with Leapfrog so substantial confidence gained.
This commit is contained in:
Peter Boyle
2015-08-29 17:18:43 +01:00
parent dc814f30da
commit 76d752585b
12 changed files with 241 additions and 107 deletions

View File

@@ -42,7 +42,6 @@ namespace Grid{
Nexp(Nexp_),MDsteps(MDsteps_),trajL(trajL_),stepsize(trajL/MDsteps){};
};
namespace MDutils{
void generate_momenta(LatticeGaugeField&,GridParallelRNG&);
void generate_momenta_su3(LatticeGaugeField&,GridParallelRNG&);
@@ -52,23 +51,34 @@ namespace Grid{
template< class IntegratorAlgorithm >
class Integrator{
private:
int levels;
std::vector<double> t_P;
double t_U;
IntegratorParameters Params;
const ActionSet as;
std::unique_ptr<LatticeGaugeField> P;
GridParallelRNG pRNG;
//ObserverList observers; // not yet
IntegratorAlgorithm TheIntegrator;
void register_observers();
void notify_observers();
void update_P(LatticeGaugeField&U, int level,double ep){
t_P[level]+=ep;
for(int a=0; a<as[level].actions.size(); ++a){
LatticeGaugeField force(U._grid);
as[level].actions.at(a)->deriv(U,force);
*P -= force*ep;
}
std::cout<<GridLogMessage;
for(int l=0; l<level;++l) std::cout<<" ";
std::cout<<"["<<level<<"] P " << " dt "<< ep <<" : t_P "<< t_P[level] <<std::endl;
}
void update_U(LatticeGaugeField&U, double ep){
@@ -81,18 +91,34 @@ namespace Grid{
Umu = expMat(Pmu, ep, Params.Nexp)*Umu;
PokeIndex<LorentzIndex>(U, Umu, mu);
}
t_U+=ep;
int fl = levels-1;
std::cout<<GridLogMessage<<" ";
for(int l=0; l<fl;++l) std::cout<<" ";
std::cout<<"["<<fl<<"] U " << " dt "<< ep <<" : t_U "<< t_U <<std::endl;
}
friend void IntegratorAlgorithm::step (LatticeGaugeField& U,
int level, std::vector<int>& clock,
Integrator<IntegratorAlgorithm>* Integ);
public:
Integrator(GridBase* grid, IntegratorParameters Par,
Integrator(GridBase* grid,
IntegratorParameters Par,
ActionSet& Aset):
Params(Par),as(Aset),P(new LatticeGaugeField(grid)),pRNG(grid){
pRNG.SeedRandomDevice();
Params(Par),
as(Aset),
P(new LatticeGaugeField(grid)),
pRNG(grid),
levels(Aset.size())
{
std::vector<int> seeds({1,2,3,4,5});
pRNG.SeedFixedIntegers(seeds);
t_P.resize(levels,0.0);
t_U=0.0;
};
~Integrator(){}
@@ -120,14 +146,17 @@ namespace Grid{
Complex Hsum = sum(Hloc);
RealD H = Hsum.real();
RealD Hterm;
std::cout<<GridLogMessage << "Momentum action H_p = "<< H << "\n";
// Actions
for(int level=0; level<as.size(); ++level)
for(int actionID=0; actionID<as[level].actions.size(); ++actionID)
H += as[level].actions.at(actionID)->S(U);
for(int level=0; level<as.size(); ++level){
for(int actionID=0; actionID<as[level].actions.size(); ++actionID){
Hterm = as[level].actions.at(actionID)->S(U);
std::cout<<GridLogMessage << "Level "<<level<<" term "<<actionID<<" H = "<<Hterm<<std::endl;
H += Hterm;
}
}
std::cout<<GridLogMessage << "Total action H = "<< H << "\n";
return H;
@@ -136,17 +165,16 @@ namespace Grid{
void integrate(LatticeGaugeField& U){
std::vector<int> clock;
clock.resize(as.size(),0);
for(int step=0; step< Params.MDsteps; ++step) // MD step
for(int step=0; step< Params.MDsteps; ++step){ // MD step
TheIntegrator.step(U,0,clock, (this));
}
for(int level=0; level<as.size(); ++level){
assert(fabs(t_U - t_P[level])<1.0e-3);
std::cout<<GridLogMessage<<" times["<<level<<"]= "<<t_P[level]<< " " << t_U <<std::endl;
}
}
};
}
}
#endif//INTEGRATOR_INCLUDED