mirror of
				https://github.com/paboyle/Grid.git
				synced 2025-11-02 21:14:32 +00:00 
			
		
		
		
	Merge branch 'develop' of https://github.com/paboyle/Grid into develop
This commit is contained in:
		@@ -14,35 +14,62 @@ std::string filestem(const int l)
 | 
			
		||||
 | 
			
		||||
int main (int argc, char ** argv)
 | 
			
		||||
{
 | 
			
		||||
#ifdef HAVE_LIME
 | 
			
		||||
  Grid_init(&argc,&argv);
 | 
			
		||||
 | 
			
		||||
  int64_t threads = GridThread::GetThreads();
 | 
			
		||||
  int64_t          threads = GridThread::GetThreads();
 | 
			
		||||
  auto             mpi     = GridDefaultMpi();
 | 
			
		||||
  std::vector<int> latt;
 | 
			
		||||
 | 
			
		||||
  MSG << "Grid is setup to use " << threads << " threads" << std::endl;
 | 
			
		||||
  MSG << "MPI partition " << mpi << std::endl;
 | 
			
		||||
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark Lime write" << std::endl;
 | 
			
		||||
  MSG << "Benchmark std write" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (int l = 4; l <= BENCH_IO_LMAX; l += 2)
 | 
			
		||||
  {
 | 
			
		||||
    auto             mpi  = GridDefaultMpi();
 | 
			
		||||
    std::vector<int> latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
 | 
			
		||||
    latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
 | 
			
		||||
 | 
			
		||||
    std::cout << "-- Local volume " << l << "^4" << std::endl;
 | 
			
		||||
    MSG << "-- Local volume " << l << "^4" << std::endl;
 | 
			
		||||
    writeBenchmark<LatticeFermion>(latt, filestem(l), stdWrite<LatticeFermion>);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark std read" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (int l = 4; l <= BENCH_IO_LMAX; l += 2)
 | 
			
		||||
  {
 | 
			
		||||
    latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
 | 
			
		||||
 | 
			
		||||
    MSG << "-- Local volume " << l << "^4" << std::endl;
 | 
			
		||||
    readBenchmark<LatticeFermion>(latt, filestem(l), stdRead<LatticeFermion>);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#ifdef HAVE_LIME
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark Grid C-Lime write" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (int l = 4; l <= BENCH_IO_LMAX; l += 2)
 | 
			
		||||
  {
 | 
			
		||||
    latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
 | 
			
		||||
 | 
			
		||||
    MSG << "-- Local volume " << l << "^4" << std::endl;
 | 
			
		||||
    writeBenchmark<LatticeFermion>(latt, filestem(l), limeWrite<LatticeFermion>);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  MSG << "Benchmark Lime read" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark Grid C-Lime read" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (int l = 4; l <= BENCH_IO_LMAX; l += 2)
 | 
			
		||||
  {
 | 
			
		||||
    auto             mpi  = GridDefaultMpi();
 | 
			
		||||
    std::vector<int> latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
 | 
			
		||||
    latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
 | 
			
		||||
 | 
			
		||||
    std::cout << "-- Local volume " << l << "^4" << std::endl;
 | 
			
		||||
    MSG << "-- Local volume " << l << "^4" << std::endl;
 | 
			
		||||
    readBenchmark<LatticeFermion>(latt, filestem(l), limeRead<LatticeFermion>);
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  Grid_finalize();
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  return EXIT_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -14,13 +14,140 @@ using WriterFn = std::function<void(const std::string, Field &)> ;
 | 
			
		||||
template <typename Field>
 | 
			
		||||
using ReaderFn = std::function<void(Field &, const std::string)>;
 | 
			
		||||
 | 
			
		||||
// AP 06/10/2020: Standard C version in case one is suspicious of the C++ API
 | 
			
		||||
// 
 | 
			
		||||
// template <typename Field>
 | 
			
		||||
// void stdWrite(const std::string filestem, Field &vec)
 | 
			
		||||
// {
 | 
			
		||||
//   std::string   rankStr = std::to_string(vec.Grid()->ThisRank());
 | 
			
		||||
//   std::FILE     *file = std::fopen((filestem + "." + rankStr + ".bin").c_str(), "wb");
 | 
			
		||||
//   size_t        size;
 | 
			
		||||
//   uint32_t      crc;
 | 
			
		||||
//   GridStopWatch ioWatch, crcWatch;
 | 
			
		||||
 | 
			
		||||
//   size = vec.Grid()->lSites()*sizeof(typename Field::scalar_object);
 | 
			
		||||
//   autoView(vec_v, vec, CpuRead);
 | 
			
		||||
//   crcWatch.Start();
 | 
			
		||||
//   crc = GridChecksum::crc32(vec_v.cpu_ptr, size);
 | 
			
		||||
//   std::fwrite(&crc, sizeof(uint32_t), 1, file);
 | 
			
		||||
//   crcWatch.Stop();
 | 
			
		||||
//   MSG << "Std I/O write: Data CRC32 " << std::hex << crc << std::dec << std::endl;
 | 
			
		||||
//   ioWatch.Start();
 | 
			
		||||
//   std::fwrite(vec_v.cpu_ptr, sizeof(typename Field::scalar_object), vec.Grid()->lSites(), file);
 | 
			
		||||
//   ioWatch.Stop();
 | 
			
		||||
//   std::fclose(file);
 | 
			
		||||
//   size *= vec.Grid()->ProcessorCount();
 | 
			
		||||
//   MSG << "Std I/O write: Wrote " << size << " bytes in " << ioWatch.Elapsed() 
 | 
			
		||||
//       << ", performance " << size/1024./1024./(ioWatch.useconds()/1.e6) 
 | 
			
		||||
//       << " MB/s" << std::endl;
 | 
			
		||||
//   MSG << "Std I/O write: checksum overhead " << crcWatch.Elapsed() << std::endl;
 | 
			
		||||
// }
 | 
			
		||||
//
 | 
			
		||||
// template <typename Field>
 | 
			
		||||
// void stdRead(Field &vec, const std::string filestem)
 | 
			
		||||
// {
 | 
			
		||||
//   std::string   rankStr = std::to_string(vec.Grid()->ThisRank());
 | 
			
		||||
//   std::FILE     *file = std::fopen((filestem + "." + rankStr + ".bin").c_str(), "rb");
 | 
			
		||||
//   size_t        size;
 | 
			
		||||
//   uint32_t      crcRead, crcData;
 | 
			
		||||
//   GridStopWatch ioWatch, crcWatch;
 | 
			
		||||
 | 
			
		||||
//   size = vec.Grid()->lSites()*sizeof(typename Field::scalar_object);
 | 
			
		||||
//   crcWatch.Start();
 | 
			
		||||
//   std::fread(&crcRead, sizeof(uint32_t), 1, file);
 | 
			
		||||
//   crcWatch.Stop();
 | 
			
		||||
//   {
 | 
			
		||||
//     autoView(vec_v, vec, CpuWrite);
 | 
			
		||||
//     ioWatch.Start();
 | 
			
		||||
//     std::fread(vec_v.cpu_ptr, sizeof(typename Field::scalar_object), vec.Grid()->lSites(), file);
 | 
			
		||||
//     ioWatch.Stop();
 | 
			
		||||
//     std::fclose(file);
 | 
			
		||||
//   }
 | 
			
		||||
//   {
 | 
			
		||||
//     autoView(vec_v, vec, CpuRead);
 | 
			
		||||
//     crcWatch.Start();
 | 
			
		||||
//     crcData = GridChecksum::crc32(vec_v.cpu_ptr, size);
 | 
			
		||||
//     crcWatch.Stop();
 | 
			
		||||
//   }
 | 
			
		||||
//   MSG << "Std I/O read: Data CRC32 " << std::hex << crcData << std::dec << std::endl;
 | 
			
		||||
//   assert(crcData == crcRead);
 | 
			
		||||
//   size *= vec.Grid()->ProcessorCount();
 | 
			
		||||
//   MSG << "Std I/O read: Read " << size << " bytes in " << ioWatch.Elapsed() 
 | 
			
		||||
//       << ", performance " << size/1024./1024./(ioWatch.useconds()/1.e6) 
 | 
			
		||||
//       << " MB/s" << std::endl;
 | 
			
		||||
//   MSG << "Std I/O read: checksum overhead " << crcWatch.Elapsed() << std::endl;
 | 
			
		||||
// }
 | 
			
		||||
 | 
			
		||||
template <typename Field>
 | 
			
		||||
void stdWrite(const std::string filestem, Field &vec)
 | 
			
		||||
{
 | 
			
		||||
  std::string   rankStr = std::to_string(vec.Grid()->ThisRank());
 | 
			
		||||
  std::ofstream file(filestem + "." + rankStr + ".bin", std::ios::out | std::ios::binary);
 | 
			
		||||
  size_t        size, sizec;
 | 
			
		||||
  uint32_t      crc;
 | 
			
		||||
  GridStopWatch ioWatch, crcWatch;
 | 
			
		||||
 | 
			
		||||
  size  = vec.Grid()->lSites()*sizeof(typename Field::scalar_object);
 | 
			
		||||
  sizec = size/sizeof(char); // just in case of...
 | 
			
		||||
  autoView(vec_v, vec, CpuRead);
 | 
			
		||||
  crcWatch.Start();
 | 
			
		||||
  crc = GridChecksum::crc32(vec_v.cpu_ptr, size);
 | 
			
		||||
  file.write(reinterpret_cast<char *>(&crc), sizeof(uint32_t)/sizeof(char));
 | 
			
		||||
  crcWatch.Stop();
 | 
			
		||||
  MSG << "Std I/O write: Data CRC32 " << std::hex << crc << std::dec << std::endl;
 | 
			
		||||
  ioWatch.Start();
 | 
			
		||||
  file.write(reinterpret_cast<char *>(vec_v.cpu_ptr), sizec);
 | 
			
		||||
  file.flush();
 | 
			
		||||
  ioWatch.Stop();
 | 
			
		||||
  size *= vec.Grid()->ProcessorCount();
 | 
			
		||||
  MSG << "Std I/O write: Wrote " << size << " bytes in " << ioWatch.Elapsed() 
 | 
			
		||||
      << ", " << size/1024./1024./(ioWatch.useconds()/1.e6) 
 | 
			
		||||
      << " MB/s" << std::endl;
 | 
			
		||||
  MSG << "Std I/O write: checksum overhead " << crcWatch.Elapsed() << std::endl;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Field>
 | 
			
		||||
void stdRead(Field &vec, const std::string filestem)
 | 
			
		||||
{
 | 
			
		||||
  std::string   rankStr = std::to_string(vec.Grid()->ThisRank());
 | 
			
		||||
  std::ifstream file(filestem + "." + rankStr + ".bin", std::ios::in | std::ios::binary);
 | 
			
		||||
  size_t        size, sizec;
 | 
			
		||||
  uint32_t      crcRead, crcData;
 | 
			
		||||
  GridStopWatch ioWatch, crcWatch;
 | 
			
		||||
 | 
			
		||||
  size  = vec.Grid()->lSites()*sizeof(typename Field::scalar_object);
 | 
			
		||||
  sizec = size/sizeof(char); // just in case of...
 | 
			
		||||
  crcWatch.Start();
 | 
			
		||||
  file.read(reinterpret_cast<char *>(&crcRead), sizeof(uint32_t)/sizeof(char));
 | 
			
		||||
  crcWatch.Stop();
 | 
			
		||||
  {
 | 
			
		||||
    autoView(vec_v, vec, CpuWrite);
 | 
			
		||||
    ioWatch.Start();
 | 
			
		||||
    file.read(reinterpret_cast<char *>(vec_v.cpu_ptr), sizec);
 | 
			
		||||
    ioWatch.Stop();
 | 
			
		||||
  }
 | 
			
		||||
  {
 | 
			
		||||
    autoView(vec_v, vec, CpuRead);
 | 
			
		||||
    crcWatch.Start();
 | 
			
		||||
    crcData = GridChecksum::crc32(vec_v.cpu_ptr, size);
 | 
			
		||||
    crcWatch.Stop();
 | 
			
		||||
  }
 | 
			
		||||
  MSG << "Std I/O read: Data CRC32 " << std::hex << crcData << std::dec << std::endl;
 | 
			
		||||
  assert(crcData == crcRead);
 | 
			
		||||
  size *= vec.Grid()->ProcessorCount();
 | 
			
		||||
  MSG << "Std I/O read: Read " << size << " bytes in " << ioWatch.Elapsed() 
 | 
			
		||||
      << ", " << size/1024./1024./(ioWatch.useconds()/1.e6) 
 | 
			
		||||
      << " MB/s" << std::endl;
 | 
			
		||||
  MSG << "Std I/O read: checksum overhead " << crcWatch.Elapsed() << std::endl;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename Field>
 | 
			
		||||
void limeWrite(const std::string filestem, Field &vec)
 | 
			
		||||
{
 | 
			
		||||
  emptyUserRecord   record;
 | 
			
		||||
  ScidacWriter binWriter(vec.Grid()->IsBoss());
 | 
			
		||||
 | 
			
		||||
  binWriter.open(filestem + ".bin");
 | 
			
		||||
  binWriter.open(filestem + ".lime.bin");
 | 
			
		||||
  binWriter.writeScidacFieldRecord(vec, record);
 | 
			
		||||
  binWriter.close();
 | 
			
		||||
}
 | 
			
		||||
@@ -31,7 +158,7 @@ void limeRead(Field &vec, const std::string filestem)
 | 
			
		||||
  emptyUserRecord   record;
 | 
			
		||||
  ScidacReader binReader;
 | 
			
		||||
 | 
			
		||||
  binReader.open(filestem + ".bin");
 | 
			
		||||
  binReader.open(filestem + ".lime.bin");
 | 
			
		||||
  binReader.readScidacFieldRecord(vec, record);
 | 
			
		||||
  binReader.close();
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -8,7 +8,6 @@ using namespace Grid;
 | 
			
		||||
 | 
			
		||||
int main (int argc, char ** argv)
 | 
			
		||||
{
 | 
			
		||||
#ifdef HAVE_LIME
 | 
			
		||||
  std::vector<std::string> dir;
 | 
			
		||||
  unsigned int             Ls;
 | 
			
		||||
  bool                     rb;
 | 
			
		||||
@@ -34,46 +33,71 @@ int main (int argc, char ** argv)
 | 
			
		||||
  }
 | 
			
		||||
  Grid_init(&argc,&argv);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  int64_t threads = GridThread::GetThreads();
 | 
			
		||||
  auto    mpi     = GridDefaultMpi();
 | 
			
		||||
 | 
			
		||||
  MSG << "Grid is setup to use " << threads << " threads" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark double precision Lime write" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (auto &d: dir)
 | 
			
		||||
  {
 | 
			
		||||
    MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
    writeBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", limeWrite<LatticeFermion>, Ls, rb);
 | 
			
		||||
  }
 | 
			
		||||
  MSG << "MPI partition " << mpi << std::endl;
 | 
			
		||||
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark double precision Lime read" << std::endl;
 | 
			
		||||
  MSG << "Benchmark Grid std write" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (auto &d: dir)
 | 
			
		||||
  {
 | 
			
		||||
    MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
    readBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", limeRead<LatticeFermion>, Ls, rb);
 | 
			
		||||
    writeBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", 
 | 
			
		||||
                                   stdWrite<LatticeFermion>, Ls, rb);
 | 
			
		||||
  }
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark Grid std read" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (auto &d: dir)
 | 
			
		||||
  {
 | 
			
		||||
    MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
    readBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", 
 | 
			
		||||
                                  stdRead<LatticeFermion>, Ls, rb);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#ifdef HAVE_LIME
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark single precision Lime write" << std::endl;
 | 
			
		||||
  MSG << "Benchmark Grid C-Lime write" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (auto &d: dir)
 | 
			
		||||
  {
 | 
			
		||||
    MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
    writeBenchmark<LatticeFermionF>(GridDefaultLatt(), d + "/ioBench", limeWrite<LatticeFermionF>, Ls, rb);
 | 
			
		||||
    writeBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", 
 | 
			
		||||
                                   limeWrite<LatticeFermion>, Ls, rb);
 | 
			
		||||
  }
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark Grid C-Lime read" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (auto &d: dir)
 | 
			
		||||
  {
 | 
			
		||||
    MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
    readBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", 
 | 
			
		||||
                                  limeRead<LatticeFermion>, Ls, rb);
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  MSG << "Benchmark single precision Lime read" << std::endl;
 | 
			
		||||
  MSG << SEP << std::endl;
 | 
			
		||||
  for (auto &d: dir)
 | 
			
		||||
  {
 | 
			
		||||
    MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
    readBenchmark<LatticeFermionF>(GridDefaultLatt(), d + "/ioBench", limeRead<LatticeFermionF>, Ls, rb);
 | 
			
		||||
  }
 | 
			
		||||
  // MSG << SEP << std::endl;
 | 
			
		||||
  // MSG << "Benchmark single precision Lime write" << std::endl;
 | 
			
		||||
  // MSG << SEP << std::endl;
 | 
			
		||||
  // for (auto &d: dir)
 | 
			
		||||
  // {
 | 
			
		||||
  //   MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
  //   writeBenchmark<LatticeFermionF>(GridDefaultLatt(), d + "/ioBench", limeWrite<LatticeFermionF>, Ls, rb);
 | 
			
		||||
  // }
 | 
			
		||||
 | 
			
		||||
  // MSG << SEP << std::endl;
 | 
			
		||||
  // MSG << "Benchmark single precision Lime read" << std::endl;
 | 
			
		||||
  // MSG << SEP << std::endl;
 | 
			
		||||
  // for (auto &d: dir)
 | 
			
		||||
  // {
 | 
			
		||||
  //   MSG << "-- Directory " << d << std::endl;
 | 
			
		||||
  //   readBenchmark<LatticeFermionF>(GridDefaultLatt(), d + "/ioBench", limeRead<LatticeFermionF>, Ls, rb);
 | 
			
		||||
  // }
 | 
			
		||||
 | 
			
		||||
  Grid_finalize();
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  return EXIT_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										76
									
								
								benchmarks/benchmark-io-csv.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										76
									
								
								benchmarks/benchmark-io-csv.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,76 @@
 | 
			
		||||
#!/usr/bin/env bash
 | 
			
		||||
 | 
			
		||||
awkscript='
 | 
			
		||||
BEGIN{
 | 
			
		||||
  i = 0;
 | 
			
		||||
  print "local L,std read (MB/s),std write (MB/s),Grid Lime read (MB/s),Grid Lime write (MB/s)"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/Benchmark std write/{
 | 
			
		||||
  i    = 0; 
 | 
			
		||||
  mode = "stdWrite";
 | 
			
		||||
} 
 | 
			
		||||
 | 
			
		||||
/Benchmark std read/{
 | 
			
		||||
  i    = 0; 
 | 
			
		||||
  mode = "stdRead"
 | 
			
		||||
} 
 | 
			
		||||
 | 
			
		||||
/Benchmark Grid C-Lime write/{
 | 
			
		||||
  i    = 0; 
 | 
			
		||||
  mode = "gridWrite";
 | 
			
		||||
} 
 | 
			
		||||
 | 
			
		||||
/Benchmark Grid C-Lime read/{
 | 
			
		||||
  i    = 0; 
 | 
			
		||||
  mode = "gridRead";
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/Local volume/{
 | 
			
		||||
  match($0, "[0-9]+\\^4");
 | 
			
		||||
  l[i] = substr($0, RSTART, RLENGTH-2);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/MB\/s/{
 | 
			
		||||
  match($0, "[0-9.eE]+ MB/s");
 | 
			
		||||
  p = substr($0, RSTART, RLENGTH-5);
 | 
			
		||||
  if (mode == "stdWrite")
 | 
			
		||||
  {
 | 
			
		||||
    sw[i] = p;
 | 
			
		||||
  }
 | 
			
		||||
  else if (mode == "stdRead")
 | 
			
		||||
  {
 | 
			
		||||
    sr[i] = p;
 | 
			
		||||
  }
 | 
			
		||||
  else if (mode == "gridWrite")
 | 
			
		||||
  {
 | 
			
		||||
    gw[i] = p;
 | 
			
		||||
  }
 | 
			
		||||
  else if (mode == "gridRead")
 | 
			
		||||
  {
 | 
			
		||||
    gr[i] = p;
 | 
			
		||||
  }
 | 
			
		||||
  i++;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
END{
 | 
			
		||||
  s = 0
 | 
			
		||||
  for (a in l)
 | 
			
		||||
  {
 | 
			
		||||
    s++;
 | 
			
		||||
  }
 | 
			
		||||
  for (j = 0; j < s; j++)
 | 
			
		||||
  {
 | 
			
		||||
    printf("%s,%s,%s,%s,%s\n", l[j], sr[j], sw[j], gr[j], gw[j]);
 | 
			
		||||
  }
 | 
			
		||||
  printf("\n");
 | 
			
		||||
}
 | 
			
		||||
'
 | 
			
		||||
 | 
			
		||||
if (( $# != 1 )); then
 | 
			
		||||
    echo "usage: `basename $0` <log file>" 1>&2
 | 
			
		||||
    exit 1
 | 
			
		||||
fi
 | 
			
		||||
LOG=$1
 | 
			
		||||
 | 
			
		||||
awk "${awkscript}" ${LOG} 
 | 
			
		||||
		Reference in New Issue
	
	Block a user