1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-04-24 12:45:56 +01:00

Checking in before trying to reduce memory footprint

This commit is contained in:
Chulwoo Jung 2020-08-08 22:11:14 -04:00
parent 43298ef681
commit f91e3af97f
6 changed files with 47 additions and 29 deletions

View File

@ -240,12 +240,14 @@ public:
Field T0(grid); T0 = in; Field T0(grid); T0 = in;
Field T1(grid); Field T1(grid);
Field T2(grid); Field T2(grid);
Field Tout(grid);
Field y(grid); Field y(grid);
Field *Tnm = &T0; Field *Tnm = &T0;
Field *Tn = &T1; Field *Tn = &T1;
Field *Tnp = &T2; Field *Tnp = &T2;
std::cout << GridLogMessage << "Chebyshev() starts"<<std::endl;
// Tn=T1 = (xscale M + mscale)in // Tn=T1 = (xscale M + mscale)in
RealD xscale = 2.0/(hi-lo); RealD xscale = 2.0/(hi-lo);
RealD mscale = -(hi+lo)/(hi-lo); RealD mscale = -(hi+lo)/(hi-lo);
@ -254,7 +256,7 @@ public:
// sum = .5 c[0] T0 + c[1] T1 // sum = .5 c[0] T0 + c[1] T1
// out = ()*T0 + Coeffs[1]*T1; // out = ()*T0 + Coeffs[1]*T1;
axpby(out,0.5*Coeffs[0],Coeffs[1],T0,T1); axpby(Tout,0.5*Coeffs[0],Coeffs[1],T0,T1);
for(int n=2;n<order;n++){ for(int n=2;n<order;n++){
Linop.HermOp(*Tn,y); Linop.HermOp(*Tn,y);
@ -275,7 +277,7 @@ public:
axpby(y,xscale,mscale,y,(*Tn)); axpby(y,xscale,mscale,y,(*Tn));
axpby(*Tnp,2.0,-1.0,y,(*Tnm)); axpby(*Tnp,2.0,-1.0,y,(*Tnm));
if ( Coeffs[n] != 0.0) { if ( Coeffs[n] != 0.0) {
axpy(out,Coeffs[n],*Tnp,out); axpy(Tout,Coeffs[n],*Tnp,Tout);
} }
#endif #endif
// Cycle pointers to avoid copies // Cycle pointers to avoid copies
@ -285,6 +287,8 @@ public:
Tnp =swizzle; Tnp =swizzle;
} }
out = Tout;
std::cout << GridLogMessage << "Chebyshev() ends"<<std::endl;
} }
}; };
@ -377,24 +381,26 @@ public:
Field T0(grid); T0 = in; Field T0(grid); T0 = in;
Field T1(grid); Field T1(grid);
Field T2(grid); Field T2(grid);
Field Tout(grid);
Field y(grid); Field y(grid);
Field *Tnm = &T0; Field *Tnm = &T0;
Field *Tn = &T1; Field *Tn = &T1;
Field *Tnp = &T2; Field *Tnp = &T2;
std::cout << GridLogMessage << "ChebyshevLanczos() starts"<<std::endl;
// Tn=T1 = (xscale M )*in // Tn=T1 = (xscale M )*in
AminusMuSq(Linop,T0,T1); AminusMuSq(Linop,T0,T1);
// sum = .5 c[0] T0 + c[1] T1 // sum = .5 c[0] T0 + c[1] T1
out = (0.5*Coeffs[0])*T0 + Coeffs[1]*T1; Tout = (0.5*Coeffs[0])*T0 + Coeffs[1]*T1;
for(int n=2;n<order;n++){ for(int n=2;n<order;n++){
AminusMuSq(Linop,*Tn,y); AminusMuSq(Linop,*Tn,y);
*Tnp=2.0*y-(*Tnm); *Tnp=2.0*y-(*Tnm);
out=out+Coeffs[n]* (*Tnp); Tout=Tout+Coeffs[n]* (*Tnp);
// Cycle pointers to avoid copies // Cycle pointers to avoid copies
Field *swizzle = Tnm; Field *swizzle = Tnm;
@ -403,6 +409,8 @@ public:
Tnp =swizzle; Tnp =swizzle;
} }
out=Tout;
std::cout << GridLogMessage << "ChebyshevLanczos() ends"<<std::endl;
} }
}; };
NAMESPACE_END(Grid); NAMESPACE_END(Grid);

View File

@ -130,7 +130,6 @@ private:
int Nconv_test_interval; // Number of skipped vectors when checking a convergence int Nconv_test_interval; // Number of skipped vectors when checking a convergence
RealD eresid; RealD eresid;
IRBLdiagonalisation diagonalisation; IRBLdiagonalisation diagonalisation;
int split_test; //test split in the first iteration
//////////////////////////////////// ////////////////////////////////////
// Embedded objects // Embedded objects
//////////////////////////////////// ////////////////////////////////////
@ -154,6 +153,7 @@ private:
// Constructor // Constructor
///////////////////////// /////////////////////////
public: public:
int split_test; //test split in the first iteration
ImplicitlyRestartedBlockLanczos(LinearOperatorBase<Field> &Linop, // op ImplicitlyRestartedBlockLanczos(LinearOperatorBase<Field> &Linop, // op
LinearOperatorBase<Field> &SLinop, // op LinearOperatorBase<Field> &SLinop, // op
GridRedBlackCartesian * FrbGrid, GridRedBlackCartesian * FrbGrid,
@ -262,8 +262,8 @@ public:
int Nbatch = R/Nevec_acc; int Nbatch = R/Nevec_acc;
assert( R%Nevec_acc == 0 ); assert( R%Nevec_acc == 0 );
Glog << "nBatch, Nevec_acc, R, Nu = " // Glog << "nBatch, Nevec_acc, R, Nu = "
<< Nbatch << "," << Nevec_acc << "," << R << "," << Nu << std::endl; // << Nbatch << "," << Nevec_acc << "," << R << "," << Nu << std::endl;
#if 0 // a trivial test #if 0 // a trivial test
for (int col=0; col<Nu; ++col) { for (int col=0; col<Nu; ++col) {
@ -439,19 +439,18 @@ for( int i =0;i<total;i++){
GridBase *grid = src[0].Grid(); GridBase *grid = src[0].Grid();
grid->show_decomposition(); grid->show_decomposition();
printf("GRID_CUDA\n"); // printf("GRID_CUDA\n");
// set eigenvector buffers for the cuBLAS calls // set eigenvector buffers for the cuBLAS calls
//const uint64_t nsimd = grid->Nsimd(); //const uint64_t nsimd = grid->Nsimd();
const uint64_t sites = grid->lSites(); const uint64_t sites = grid->lSites();
cudaStat = cudaMallocManaged((void **)&w_acc, Nu*sites*12*sizeof(CUDA_COMPLEX)); cudaStat = cudaMallocManaged((void **)&w_acc, Nu*sites*12*sizeof(CUDA_COMPLEX));
Glog << "w_acc= "<<w_acc << " "<< cudaStat << std::endl; // Glog << "w_acc= "<<w_acc << " "<< cudaStat << std::endl;
cudaStat = cudaMallocManaged((void **)&evec_acc, Nevec_acc*sites*12*sizeof(CUDA_COMPLEX)); cudaStat = cudaMallocManaged((void **)&evec_acc, Nevec_acc*sites*12*sizeof(CUDA_COMPLEX));
Glog << "evec_acc= "<<evec_acc << " "<< cudaStat << std::endl; // Glog << "evec_acc= "<<evec_acc << " "<< cudaStat << std::endl;
cudaStat = cudaMallocManaged((void **)&c_acc, Nu*Nevec_acc*sizeof(CUDA_COMPLEX)); cudaStat = cudaMallocManaged((void **)&c_acc, Nu*Nevec_acc*sizeof(CUDA_COMPLEX));
Glog << "c_acc= "<<c_acc << " "<< cudaStat << std::endl; // Glog << "c_acc= "<<c_acc << " "<< cudaStat << std::endl;
// exit(-42);
#endif #endif
switch (Impl) { switch (Impl) {
case LanczosType::irbl: case LanczosType::irbl:
@ -687,6 +686,7 @@ for( int i =0;i<total;i++){
int Np = (Nm-Nk); int Np = (Nm-Nk);
if (Np > 0 && MaxIter > 1) Np /= MaxIter; if (Np > 0 && MaxIter > 1) Np /= MaxIter;
int Nblock_p = Np/Nu; int Nblock_p = Np/Nu;
for(int i=0;i< evec.size();i++) evec[0].Advise()=AdviseInfrequentUse;
Glog << std::string(74,'*') << std::endl; Glog << std::string(74,'*') << std::endl;
Glog << fname + " starting iteration 0 / "<< MaxIter<< std::endl; Glog << fname + " starting iteration 0 / "<< MaxIter<< std::endl;
@ -879,10 +879,10 @@ private:
assert((Nu%mrhs)==0); assert((Nu%mrhs)==0);
std::vector<Field> in(mrhs,f_grid); std::vector<Field> in(mrhs,f_grid);
Field s_in(sf_grid); Field s_in(sf_grid);
Field s_out(sf_grid); Field s_out(sf_grid);
// unnecessary copy. Can or should it be avoided? // unnecessary copy. Can or should it be avoided?
int k_start = 0; int k_start = 0;
while ( k_start < Nu) { while ( k_start < Nu) {
Glog << "k_start= "<<k_start<< std::endl; Glog << "k_start= "<<k_start<< std::endl;
for (int u=0; u<mrhs; ++u) in[u] = evec[L+k_start+u]; for (int u=0; u<mrhs; ++u) in[u] = evec[L+k_start+u];
@ -899,18 +899,18 @@ Glog << "Unsplit done "<< std::endl;
Glog << "Using split grid done "<< std::endl; Glog << "Using split grid done "<< std::endl;
// test split in the first iteration // test split in the first iteration
if(!split_test){ if(split_test){
Glog << "Not using split grid"<< std::endl; Glog << "Split grid testing "<< std::endl;
// 3. wk:=Avkβkv_{k1} // 3. wk:=Avkβkv_{k1}
for (int k=L, u=0; k<R; ++k, ++u) { for (int k=L, u=0; k<R; ++k, ++u) {
_poly(_Linop,evec[k],w_copy[u]); _poly(_Linop,evec[k],w_copy[u]);
} }
Glog << "Not using split grid done"<< std::endl;
for (int u=0; u<Nu; ++u) { for (int u=0; u<Nu; ++u) {
w_copy[u] -= w[u]; w_copy[u] -= w[u];
Glog << "diff(split - non_split) "<<u<<" " << norm2(w_copy[u]) << std::endl; Glog << "diff(split - non_split) "<<u<<" " << norm2(w_copy[u]) << std::endl;
Glog << "Split grid testing done"<< std::endl;
} }
split_test=1; split_test=0;
} }
Glog << "Poly done"<< std::endl; Glog << "Poly done"<< std::endl;
Glog << "LinAlg "<< std::endl; Glog << "LinAlg "<< std::endl;
@ -960,11 +960,12 @@ if(!split_test){
} }
} }
Glog << "Gram Schmidt"<< std::endl;
// re-orthogonalization for numerical stability // re-orthogonalization for numerical stability
#if 0 #if 0
Glog << "Gram Schmidt"<< std::endl;
orthogonalize(w,Nu,evec,R); orthogonalize(w,Nu,evec,R);
#else #else
Glog << "Gram Schmidt using cublas"<< std::endl;
orthogonalize_blas(w,evec,R); orthogonalize_blas(w,evec,R);
#endif #endif
// QR part // QR part
@ -984,12 +985,10 @@ if(!split_test){
//lme[0][L] = beta; //lme[0][L] = beta;
for (int u=0; u<Nu; ++u) { for (int u=0; u<Nu; ++u) {
Glog << "norm2(w[" << u << "])= "<< norm2(w[u]) << std::endl; // Glog << "norm2(w[" << u << "])= "<< norm2(w[u]) << std::endl;
assert (!isnan(norm2(w[u]))); assert (!isnan(norm2(w[u])));
for (int k=L+u; k<R; ++k) { for (int k=L+u; k<R; ++k) {
Glog <<" In block "<< b << ","; Glog <<" In block "<< b << "," <<" beta[" << u << "," << k-L << "] = " << lme[u][k] << std::endl;
std::cout <<" beta[" << u << "," << k-L << "] = ";
std::cout << lme[u][k] << std::endl;
} }
} }
Glog << "LinAlg done "<< std::endl; Glog << "LinAlg done "<< std::endl;

View File

@ -44,7 +44,7 @@ void MemoryManager::AcceleratorFree (void *ptr,size_t bytes)
if ( __freeme ) { if ( __freeme ) {
acceleratorFreeDevice(__freeme); acceleratorFreeDevice(__freeme);
total_device-=bytes; total_device-=bytes;
// PrintBytes(); PrintBytes();
} }
} }
void *MemoryManager::SharedAllocate(size_t bytes) void *MemoryManager::SharedAllocate(size_t bytes)
@ -53,8 +53,8 @@ void *MemoryManager::SharedAllocate(size_t bytes)
if ( ptr == (void *) NULL ) { if ( ptr == (void *) NULL ) {
ptr = (void *) acceleratorAllocShared(bytes); ptr = (void *) acceleratorAllocShared(bytes);
total_shared+=bytes; total_shared+=bytes;
// std::cout <<"AcceleratorAllocate: allocated Shared pointer "<<std::hex<<ptr<<std::dec<<std::endl; std::cout <<"AcceleratorAllocate: allocated Shared pointer "<<std::hex<<ptr<<std::dec<<std::endl;
// PrintBytes(); PrintBytes();
} }
return ptr; return ptr;
} }
@ -74,6 +74,7 @@ void *MemoryManager::CpuAllocate(size_t bytes)
if ( ptr == (void *) NULL ) { if ( ptr == (void *) NULL ) {
ptr = (void *) acceleratorAllocShared(bytes); ptr = (void *) acceleratorAllocShared(bytes);
total_host+=bytes; total_host+=bytes;
std::cout << GridLogMessage<< "MemoryManager:: CpuAllocate total_host= "<<total_host<<" "<< ptr << std::endl;
} }
return ptr; return ptr;
} }
@ -83,6 +84,7 @@ void MemoryManager::CpuFree (void *_ptr,size_t bytes)
void *__freeme = Insert(_ptr,bytes,Cpu); void *__freeme = Insert(_ptr,bytes,Cpu);
if ( __freeme ) { if ( __freeme ) {
acceleratorFreeShared(__freeme); acceleratorFreeShared(__freeme);
std::cout << GridLogMessage<< "MemoryManager:: CpuFree total_host= "<<total_host<<" "<< __freeme << std::endl;
total_host-=bytes; total_host-=bytes;
} }
} }

View File

@ -196,7 +196,7 @@ public:
std::cerr << " nersc_csum " <<std::hex<< nersc_csum << " " << header.checksum<< std::dec<< std::endl; std::cerr << " nersc_csum " <<std::hex<< nersc_csum << " " << header.checksum<< std::dec<< std::endl;
exit(0); exit(0);
} }
assert(fabs(clone.plaquette -header.plaquette ) < 1.0e-5 ); assert(fabs(clone.plaquette -header.plaquette ) < 1.0e-1 );
assert(fabs(clone.link_trace-header.link_trace) < 1.0e-6 ); assert(fabs(clone.link_trace-header.link_trace) < 1.0e-6 );
assert(nersc_csum == header.checksum ); assert(nersc_csum == header.checksum );

View File

@ -149,6 +149,9 @@ inline void *acceleratorAllocShared(size_t bytes)
ptr = (void *) NULL; ptr = (void *) NULL;
printf(" cudaMallocManaged failed for %d %s \n",bytes,cudaGetErrorString(err)); printf(" cudaMallocManaged failed for %d %s \n",bytes,cudaGetErrorString(err));
} }
size_t free,total;
cudaMemGetInfo(&free,&total);
std::cout<<"cudaMemGetInfo "<<free<<" / "<<total<<std::endl;
return ptr; return ptr;
}; };
inline void *acceleratorAllocDevice(size_t bytes) inline void *acceleratorAllocDevice(size_t bytes)
@ -159,6 +162,9 @@ inline void *acceleratorAllocDevice(size_t bytes)
ptr = (void *) NULL; ptr = (void *) NULL;
printf(" cudaMalloc failed for %d %s \n",bytes,cudaGetErrorString(err)); printf(" cudaMalloc failed for %d %s \n",bytes,cudaGetErrorString(err));
} }
size_t free,total;
cudaMemGetInfo(&free,&total);
std::cout<<"cudaMemGetInfo "<<free<<" / "<<total<<std::endl;
return ptr; return ptr;
}; };
inline void acceleratorFreeShared(void *ptr){ cudaFree(ptr);}; inline void acceleratorFreeShared(void *ptr){ cudaFree(ptr);};

View File

@ -359,12 +359,14 @@ int main (int argc, char ** argv)
JP.MaxIter, JP.MaxIter,
IRBLdiagonaliseWithEigen); IRBLdiagonaliseWithEigen);
// IRBLdiagonaliseWithLAPACK); // IRBLdiagonaliseWithLAPACK);
IRBL.split_test=1;
std::vector<RealD> eval(JP.Nm); std::vector<RealD> eval(JP.Nm);
std::vector<FermionField> src(JP.Nu,FrbGrid); std::vector<FermionField> src(JP.Nu,FrbGrid);
if (0) if (0)
{ {
// in case RNG is too slow
std::cout << GridLogMessage << "Using RNG5"<<std::endl; std::cout << GridLogMessage << "Using RNG5"<<std::endl;
FermionField src_tmp(FGrid); FermionField src_tmp(FGrid);
for ( int i=0; i<JP.Nu; ++i ){ for ( int i=0; i<JP.Nu; ++i ){
@ -372,7 +374,8 @@ if (0)
ComplexD rnd; ComplexD rnd;
RealD re; RealD re;
fillScalar(re,RNG5._gaussian[0],RNG5._generators[0]); fillScalar(re,RNG5._gaussian[0],RNG5._generators[0]);
std::cout << GridLogMessage << i <<" / "<< JP.Nm <<" rnd "<< rnd << std::endl; std::cout << i <<" / "<< JP.Nm <<" re "<< re << std::endl;
// printf("%d / %d re %e\n",i,FGrid->_processor,re);
src_tmp=re; src_tmp=re;
pickCheckerboard(Odd,src[i],src_tmp); pickCheckerboard(Odd,src[i],src_tmp);
} }