1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-04-18 09:45:55 +01:00

Hadrons: meson fields indentation fix

This commit is contained in:
Antonin Portelli 2018-08-06 12:42:25 +01:00
parent 3f0f92cda6
commit 4eac4e575e

View File

@ -160,7 +160,8 @@ void TA2AMesonField<FImpl>::MesonField(Eigen::Tensor<ComplexD,5> &mat,
int MFlvol = ld*Lblock*Rblock*Nmom; int MFlvol = ld*Lblock*Rblock*Nmom;
Vector<SpinMatrix_v > lvSum(MFrvol); Vector<SpinMatrix_v > lvSum(MFrvol);
parallel_for (int r = 0; r < MFrvol; r++){ parallel_for (int r = 0; r < MFrvol; r++)
{
lvSum[r] = zero; lvSum[r] = zero;
} }
@ -176,113 +177,116 @@ void TA2AMesonField<FImpl>::MesonField(Eigen::Tensor<ComplexD,5> &mat,
t0-=usecond(); t0-=usecond();
// Nested parallelism would be ok // Nested parallelism would be ok
// Wasting cores here. Test case r // Wasting cores here. Test case r
parallel_for(int r=0;r<rd;r++){ parallel_for(int r=0;r<rd;r++)
{
int so=r*grid->_ostride[orthogdim]; // base offset for start of plane int so=r*grid->_ostride[orthogdim]; // base offset for start of plane
for(int n=0;n<e1;n++){ for(int n=0;n<e1;n++)
for(int b=0;b<e2;b++){ for(int b=0;b<e2;b++)
{
int ss= so+n*stride+b; int ss= so+n*stride+b;
for(int i=0;i<Lblock;i++){ for(int i=0;i<Lblock;i++)
{
auto left = conjugate(lhs_wi[i]._odata[ss]); auto left = conjugate(lhs_wi[i]._odata[ss]);
for(int j=0;j<Rblock;j++){ for(int j=0;j<Rblock;j++)
{
SpinMatrix_v vv; SpinMatrix_v vv;
auto right = rhs_vj[j]._odata[ss]; auto right = rhs_vj[j]._odata[ss];
for(int s1=0;s1<Ns;s1++){
for(int s2=0;s2<Ns;s2++){ for(int s1=0;s1<Ns;s1++)
for(int s2=0;s2<Ns;s2++)
{
vv()(s1,s2)() = left()(s2)(0) * right()(s1)(0) vv()(s1,s2)() = left()(s2)(0) * right()(s1)(0)
+ left()(s2)(1) * right()(s1)(1) + left()(s2)(1) * right()(s1)(1)
+ left()(s2)(2) * right()(s1)(2); + left()(s2)(2) * right()(s1)(2);
}} }
// After getting the sitewise product do the mom phase loop // After getting the sitewise product do the mom phase loop
int base = Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*r; int base = Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*r;
for ( int m=0;m<Nmom;m++){
for ( int m=0;m<Nmom;m++)
{
int idx = m+base; int idx = m+base;
auto phase = mom[m]._odata[ss]; auto phase = mom[m]._odata[ss];
mac(&lvSum[idx],&vv,&phase); mac(&lvSum[idx],&vv,&phase);
} }
}
} }
} }
} }
} }
t0+=usecond(); t0+=usecond();
// Sum across simd lanes in the plane, breaking out orthog dir. // Sum across simd lanes in the plane, breaking out orthog dir.
t1-=usecond(); t1-=usecond();
parallel_for(int rt=0;rt<rd;rt++){ parallel_for(int rt=0;rt<rd;rt++)
{
std::vector<int> icoor(Nd); std::vector<int> icoor(Nd);
std::vector<SpinMatrix_s> extracted(Nsimd); std::vector<SpinMatrix_s> extracted(Nsimd);
for(int i=0;i<Lblock;i++){ for(int i=0;i<Lblock;i++)
for(int j=0;j<Rblock;j++){ for(int j=0;j<Rblock;j++)
for(int m=0;m<Nmom;m++){ for(int m=0;m<Nmom;m++)
{
int ij_rdx = m+Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*rt; int ij_rdx = m+Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*rt;
extract(lvSum[ij_rdx],extracted); extract(lvSum[ij_rdx],extracted);
for(int idx=0;idx<Nsimd;idx++)
for(int idx=0;idx<Nsimd;idx++){ {
grid->iCoorFromIindex(icoor,idx); grid->iCoorFromIindex(icoor,idx);
int ldx = rt+icoor[orthogdim]*rd; int ldx = rt+icoor[orthogdim]*rd;
int ij_ldx = m+Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*ldx; int ij_ldx = m+Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*ldx;
lsSum[ij_ldx]=lsSum[ij_ldx]+extracted[idx]; lsSum[ij_ldx]=lsSum[ij_ldx]+extracted[idx];
} }
}}} }
} }
t1+=usecond(); t1+=usecond();
assert(mat.dimension(0) == Nmom); assert(mat.dimension(0) == Nmom);
assert(mat.dimension(1) == Ngamma); assert(mat.dimension(1) == Ngamma);
assert(mat.dimension(2) == Nt); assert(mat.dimension(2) == Nt);
t2-=usecond(); t2-=usecond();
// ld loop and local only?? // ld loop and local only??
int pd = grid->_processors[orthogdim]; int pd = grid->_processors[orthogdim];
int pc = grid->_processor_coor[orthogdim]; int pc = grid->_processor_coor[orthogdim];
parallel_for_nest2(int lt=0;lt<ld;lt++) parallel_for_nest2(int lt=0;lt<ld;lt++)
{ {
for(int pt=0;pt<pd;pt++){ for(int pt=0;pt<pd;pt++)
{
int t = lt + pt*ld; int t = lt + pt*ld;
if (pt == pc){ if (pt == pc)
for(int i=0;i<Lblock;i++){ {
for(int j=0;j<Rblock;j++){ for(int i=0;i<Lblock;i++)
for(int m=0;m<Nmom;m++){ for(int j=0;j<Rblock;j++)
for(int m=0;m<Nmom;m++)
{
int ij_dx = m+Nmom*i + Nmom*Lblock * j + Nmom*Lblock * Rblock * lt; int ij_dx = m+Nmom*i + Nmom*Lblock * j + Nmom*Lblock * Rblock * lt;
for(int mu=0;mu<Ngamma;mu++){
for(int mu=0;mu<Ngamma;mu++)
{
// this is a bit slow // this is a bit slow
mat(m,mu,t,i,j) = trace(lsSum[ij_dx]*Gamma(gammas[mu])); mat(m,mu,t,i,j) = trace(lsSum[ij_dx]*Gamma(gammas[mu]));
} }
} }
} }
} else
} else { {
const scalar_type zz(0.0); const scalar_type zz(0.0);
for(int i=0;i<Lblock;i++){
for(int j=0;j<Rblock;j++){ for(int i=0;i<Lblock;i++)
for(int mu=0;mu<Ngamma;mu++){ for(int j=0;j<Rblock;j++)
for(int m=0;m<Nmom;m++){ for(int mu=0;mu<Ngamma;mu++)
for(int m=0;m<Nmom;m++)
{
mat(m,mu,t,i,j) =zz; mat(m,mu,t,i,j) =zz;
} }
} }
} }
} }
}
}
}
t2+=usecond(); t2+=usecond();
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// This global sum is taking as much as 50% of time on 16 nodes // This global sum is taking as much as 50% of time on 16 nodes
@ -344,7 +348,8 @@ void TA2AMesonField<FImpl>::execute(void)
/////////////////////////////////////////////// ///////////////////////////////////////////////
GridBase *grid = env().getGrid(1); GridBase *grid = env().getGrid(1);
std::vector<LatticeComplex> phases(nmom,grid); std::vector<LatticeComplex> phases(nmom,grid);
for(int m=0;m<nmom;m++){ for(int m=0;m<nmom;m++)
{
phases[m] = Complex(1.0); // All zero momentum for now phases[m] = Complex(1.0); // All zero momentum for now
} }
@ -377,10 +382,10 @@ void TA2AMesonField<FImpl>::execute(void)
double t0 = usecond(); double t0 = usecond();
int N_i = N; int N_i = N;
int N_j = N; int N_j = N;
for(int i=0;i<N_i;i+=schurBlock){ //loop over SchurBlocking to suppress 5D matrix overhead
for(int j=0;j<N_j;j+=schurBlock){
for(int i=0;i<N_i;i+=schurBlock) //loop over SchurBlocking to suppress 5D matrix overhead
for(int j=0;j<N_j;j+=schurBlock)
{
/////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////
// Get the W and V vectors for this schurBlock^2 set of terms // Get the W and V vectors for this schurBlock^2 set of terms
/////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////
@ -398,9 +403,9 @@ void TA2AMesonField<FImpl>::execute(void)
/////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////
// Series of cache blocked chunks of the contractions within this SchurBlock // Series of cache blocked chunks of the contractions within this SchurBlock
/////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////
for(int ii=0;ii<N_ii;ii+=cacheBlock){ for(int ii=0;ii<N_ii;ii+=cacheBlock)
for(int jj=0;jj<N_jj;jj+=cacheBlock){ for(int jj=0;jj<N_jj;jj+=cacheBlock)
{
int N_iii = MIN(N_ii-ii,cacheBlock); int N_iii = MIN(N_ii-ii,cacheBlock);
int N_jjj = MIN(N_jj-jj,cacheBlock); int N_jjj = MIN(N_jj-jj,cacheBlock);
Eigen::Tensor<ComplexD,5> mesonFieldBlocked(nmom,ngamma,nt,N_iii,N_jjj); Eigen::Tensor<ComplexD,5> mesonFieldBlocked(nmom,ngamma,nt,N_iii,N_jjj);
@ -417,18 +422,16 @@ void TA2AMesonField<FImpl>::execute(void)
/////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////
// Copy back to full meson field tensor // Copy back to full meson field tensor
/////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////
parallel_for_nest2(int iii=0;iii< N_iii;iii++) { parallel_for_nest2(int iii=0;iii< N_iii;iii++)
for(int jjj=0;jjj< N_jjj;jjj++) { for(int jjj=0;jjj< N_jjj;jjj++)
for(int m =0;m< nmom;m++) { for(int m =0;m< nmom;m++)
for(int g =0;g< ngamma;g++) { for(int g =0;g< ngamma;g++)
for(int t =0;t< nt;t++) { for(int t =0;t< nt;t++)
{
mesonField(m,g,t,i+ii+iii,j+jj+jjj) = mesonFieldBlocked(m,g,t,iii,jjj); mesonField(m,g,t,i+ii+iii,j+jj+jjj) = mesonFieldBlocked(m,g,t,iii,jjj);
}}} }
}
}} }
}}
}}
double nodes=grid->NodeCount(); double nodes=grid->NodeCount();
double t1 = usecond(); double t1 = usecond();
@ -450,21 +453,22 @@ void TA2AMesonField<FImpl>::execute(void)
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
std::vector<ComplexD> corr(nt,ComplexD(0.0)); std::vector<ComplexD> corr(nt,ComplexD(0.0));
for(int i=0;i<N;i++){ for(int i=0;i<N;i++)
for(int j=0;j<N;j++){ for(int j=0;j<N;j++)
{
int m=0; // first momentum int m=0; // first momentum
int g=0; // first gamma in above ordering is gamma5 for pion int g=0; // first gamma in above ordering is gamma5 for pion
for(int t0=0;t0<nt;t0++){
for(int t=0;t<nt;t++){ for(int t0=0;t0<nt;t0++)
for(int t=0;t<nt;t++)
{
int tt = (t0+t)%nt; int tt = (t0+t)%nt;
corr[t] += mesonField(m,g,t0,i,j)* mesonField(m,g,tt,j,i); corr[t] += mesonField(m,g,t0,i,j)* mesonField(m,g,tt,j,i);
}} }
}} }
for(int t=0;t<nt;t++) corr[t] = corr[t]/ (double)nt; for(int t=0;t<nt;t++) corr[t] = corr[t]/ (double)nt;
for(int t=0;t<nt;t++) LOG(Message) << " " << t << " " << corr[t]<<std::endl; for(int t=0;t<nt;t++) LOG(Message) << " " << t << " " << corr[t]<<std::endl;
// saveResult(par().output, "meson", result);
} }
END_MODULE_NAMESPACE END_MODULE_NAMESPACE