1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-10 07:55:35 +00:00

Hadrons: meson fields indentation fix

This commit is contained in:
Antonin Portelli 2018-08-06 12:42:25 +01:00
parent 3f0f92cda6
commit 4eac4e575e

View File

@ -160,7 +160,8 @@ void TA2AMesonField<FImpl>::MesonField(Eigen::Tensor<ComplexD,5> &mat,
int MFlvol = ld*Lblock*Rblock*Nmom;
Vector<SpinMatrix_v > lvSum(MFrvol);
parallel_for (int r = 0; r < MFrvol; r++){
parallel_for (int r = 0; r < MFrvol; r++)
{
lvSum[r] = zero;
}
@ -176,113 +177,116 @@ void TA2AMesonField<FImpl>::MesonField(Eigen::Tensor<ComplexD,5> &mat,
t0-=usecond();
// Nested parallelism would be ok
// Wasting cores here. Test case r
parallel_for(int r=0;r<rd;r++){
parallel_for(int r=0;r<rd;r++)
{
int so=r*grid->_ostride[orthogdim]; // base offset for start of plane
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
for(int n=0;n<e1;n++)
for(int b=0;b<e2;b++)
{
int ss= so+n*stride+b;
for(int i=0;i<Lblock;i++){
for(int i=0;i<Lblock;i++)
{
auto left = conjugate(lhs_wi[i]._odata[ss]);
for(int j=0;j<Rblock;j++){
for(int j=0;j<Rblock;j++)
{
SpinMatrix_v vv;
auto right = rhs_vj[j]._odata[ss];
for(int s1=0;s1<Ns;s1++){
for(int s2=0;s2<Ns;s2++){
for(int s1=0;s1<Ns;s1++)
for(int s2=0;s2<Ns;s2++)
{
vv()(s1,s2)() = left()(s2)(0) * right()(s1)(0)
+ left()(s2)(1) * right()(s1)(1)
+ left()(s2)(2) * right()(s1)(2);
}}
}
// After getting the sitewise product do the mom phase loop
int base = Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*r;
for ( int m=0;m<Nmom;m++){
for ( int m=0;m<Nmom;m++)
{
int idx = m+base;
auto phase = mom[m]._odata[ss];
mac(&lvSum[idx],&vv,&phase);
}
}
}
}
}
}
t0+=usecond();
// Sum across simd lanes in the plane, breaking out orthog dir.
t1-=usecond();
parallel_for(int rt=0;rt<rd;rt++){
parallel_for(int rt=0;rt<rd;rt++)
{
std::vector<int> icoor(Nd);
std::vector<SpinMatrix_s> extracted(Nsimd);
for(int i=0;i<Lblock;i++){
for(int j=0;j<Rblock;j++){
for(int m=0;m<Nmom;m++){
for(int i=0;i<Lblock;i++)
for(int j=0;j<Rblock;j++)
for(int m=0;m<Nmom;m++)
{
int ij_rdx = m+Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*rt;
extract(lvSum[ij_rdx],extracted);
for(int idx=0;idx<Nsimd;idx++){
for(int idx=0;idx<Nsimd;idx++)
{
grid->iCoorFromIindex(icoor,idx);
int ldx = rt+icoor[orthogdim]*rd;
int ij_ldx = m+Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*ldx;
lsSum[ij_ldx]=lsSum[ij_ldx]+extracted[idx];
}
}}}
}
}
t1+=usecond();
assert(mat.dimension(0) == Nmom);
assert(mat.dimension(1) == Ngamma);
assert(mat.dimension(2) == Nt);
t2-=usecond();
// ld loop and local only??
int pd = grid->_processors[orthogdim];
int pc = grid->_processor_coor[orthogdim];
parallel_for_nest2(int lt=0;lt<ld;lt++)
{
for(int pt=0;pt<pd;pt++){
for(int pt=0;pt<pd;pt++)
{
int t = lt + pt*ld;
if (pt == pc){
for(int i=0;i<Lblock;i++){
for(int j=0;j<Rblock;j++){
for(int m=0;m<Nmom;m++){
if (pt == pc)
{
for(int i=0;i<Lblock;i++)
for(int j=0;j<Rblock;j++)
for(int m=0;m<Nmom;m++)
{
int ij_dx = m+Nmom*i + Nmom*Lblock * j + Nmom*Lblock * Rblock * lt;
for(int mu=0;mu<Ngamma;mu++){
for(int mu=0;mu<Ngamma;mu++)
{
// this is a bit slow
mat(m,mu,t,i,j) = trace(lsSum[ij_dx]*Gamma(gammas[mu]));
}
}
}
}
} else {
else
{
const scalar_type zz(0.0);
for(int i=0;i<Lblock;i++){
for(int j=0;j<Rblock;j++){
for(int mu=0;mu<Ngamma;mu++){
for(int m=0;m<Nmom;m++){
for(int i=0;i<Lblock;i++)
for(int j=0;j<Rblock;j++)
for(int mu=0;mu<Ngamma;mu++)
for(int m=0;m<Nmom;m++)
{
mat(m,mu,t,i,j) =zz;
}
}
}
}
}
}
}
t2+=usecond();
////////////////////////////////////////////////////////////////////
// This global sum is taking as much as 50% of time on 16 nodes
@ -344,7 +348,8 @@ void TA2AMesonField<FImpl>::execute(void)
///////////////////////////////////////////////
GridBase *grid = env().getGrid(1);
std::vector<LatticeComplex> phases(nmom,grid);
for(int m=0;m<nmom;m++){
for(int m=0;m<nmom;m++)
{
phases[m] = Complex(1.0); // All zero momentum for now
}
@ -377,10 +382,10 @@ void TA2AMesonField<FImpl>::execute(void)
double t0 = usecond();
int N_i = N;
int N_j = N;
for(int i=0;i<N_i;i+=schurBlock){ //loop over SchurBlocking to suppress 5D matrix overhead
for(int j=0;j<N_j;j+=schurBlock){
for(int i=0;i<N_i;i+=schurBlock) //loop over SchurBlocking to suppress 5D matrix overhead
for(int j=0;j<N_j;j+=schurBlock)
{
///////////////////////////////////////////////////////////////
// Get the W and V vectors for this schurBlock^2 set of terms
///////////////////////////////////////////////////////////////
@ -398,9 +403,9 @@ void TA2AMesonField<FImpl>::execute(void)
///////////////////////////////////////////////////////////////
// Series of cache blocked chunks of the contractions within this SchurBlock
///////////////////////////////////////////////////////////////
for(int ii=0;ii<N_ii;ii+=cacheBlock){
for(int jj=0;jj<N_jj;jj+=cacheBlock){
for(int ii=0;ii<N_ii;ii+=cacheBlock)
for(int jj=0;jj<N_jj;jj+=cacheBlock)
{
int N_iii = MIN(N_ii-ii,cacheBlock);
int N_jjj = MIN(N_jj-jj,cacheBlock);
Eigen::Tensor<ComplexD,5> mesonFieldBlocked(nmom,ngamma,nt,N_iii,N_jjj);
@ -417,18 +422,16 @@ void TA2AMesonField<FImpl>::execute(void)
///////////////////////////////////////////////////////////////
// Copy back to full meson field tensor
///////////////////////////////////////////////////////////////
parallel_for_nest2(int iii=0;iii< N_iii;iii++) {
for(int jjj=0;jjj< N_jjj;jjj++) {
for(int m =0;m< nmom;m++) {
for(int g =0;g< ngamma;g++) {
for(int t =0;t< nt;t++) {
parallel_for_nest2(int iii=0;iii< N_iii;iii++)
for(int jjj=0;jjj< N_jjj;jjj++)
for(int m =0;m< nmom;m++)
for(int g =0;g< ngamma;g++)
for(int t =0;t< nt;t++)
{
mesonField(m,g,t,i+ii+iii,j+jj+jjj) = mesonFieldBlocked(m,g,t,iii,jjj);
}}}
}}
}}
}}
}
}
}
double nodes=grid->NodeCount();
double t1 = usecond();
@ -450,21 +453,22 @@ void TA2AMesonField<FImpl>::execute(void)
/////////////////////////////////////////////////////////////////////////
std::vector<ComplexD> corr(nt,ComplexD(0.0));
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
for(int i=0;i<N;i++)
for(int j=0;j<N;j++)
{
int m=0; // first momentum
int g=0; // first gamma in above ordering is gamma5 for pion
for(int t0=0;t0<nt;t0++){
for(int t=0;t<nt;t++){
for(int t0=0;t0<nt;t0++)
for(int t=0;t<nt;t++)
{
int tt = (t0+t)%nt;
corr[t] += mesonField(m,g,t0,i,j)* mesonField(m,g,tt,j,i);
}}
}}
}
}
for(int t=0;t<nt;t++) corr[t] = corr[t]/ (double)nt;
for(int t=0;t<nt;t++) LOG(Message) << " " << t << " " << corr[t]<<std::endl;
// saveResult(par().output, "meson", result);
}
END_MODULE_NAMESPACE