mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-10 07:55:35 +00:00
I/O improvements
This commit is contained in:
parent
092dcd4e04
commit
3bfd1f13e6
@ -55,8 +55,8 @@ int main (int argc, char ** argv)
|
||||
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||
std::cout<<GridLogMessage << " L "<<"\t\t"<<"bytes"<<"\t\t\t"<<"GB/s"<<"\t\t"<<"Gflop/s"<<"\t\t seconds"<<std::endl;
|
||||
std::cout<<GridLogMessage << "----------------------------------------------------------"<<std::endl;
|
||||
uint64_t lmax=44;
|
||||
#define NLOOP (1*lmax*lmax*lmax*lmax/vol)
|
||||
uint64_t lmax=64;
|
||||
#define NLOOP (100*lmax*lmax*lmax*lmax/vol)
|
||||
for(int lat=4;lat<=lmax;lat+=4){
|
||||
|
||||
std::vector<int> latt_size ({lat*mpi_layout[0],lat*mpi_layout[1],lat*mpi_layout[2],lat*mpi_layout[3]});
|
||||
|
@ -35,9 +35,9 @@ using namespace Grid::QCD;
|
||||
int main (int argc, char ** argv)
|
||||
{
|
||||
Grid_init(&argc,&argv);
|
||||
#define LMAX (32)
|
||||
#define LMAX (64)
|
||||
|
||||
int Nloop=200;
|
||||
int Nloop=20;
|
||||
|
||||
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||
|
@ -27,7 +27,7 @@ AX_GXX_VERSION
|
||||
AC_DEFINE_UNQUOTED([GXX_VERSION],["$GXX_VERSION"],
|
||||
[version of g++ that will compile the code])
|
||||
|
||||
CXXFLAGS="-O3 $CXXFLAGS"
|
||||
CXXFLAGS="-g $CXXFLAGS"
|
||||
|
||||
|
||||
############### Checks for typedefs, structures, and compiler characteristics
|
||||
|
@ -65,7 +65,7 @@ void TLoad::setup(void)
|
||||
// execution ///////////////////////////////////////////////////////////////////
|
||||
void TLoad::execute(void)
|
||||
{
|
||||
NerscField header;
|
||||
FieldMetaData header;
|
||||
std::string fileName = par().file + "."
|
||||
+ std::to_string(env().getTrajectory());
|
||||
|
||||
@ -74,5 +74,5 @@ void TLoad::execute(void)
|
||||
LatticeGaugeField &U = *env().createLattice<LatticeGaugeField>(getName());
|
||||
NerscIO::readConfiguration(U, header, fileName);
|
||||
LOG(Message) << "NERSC header:" << std::endl;
|
||||
dump_nersc_header(header, LOG(Message));
|
||||
dump_meta_data(header, LOG(Message));
|
||||
}
|
||||
|
@ -42,6 +42,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
#include <Grid/GridQCDcore.h>
|
||||
#include <Grid/qcd/action/Action.h>
|
||||
#include <Grid/qcd/smearing/Smearing.h>
|
||||
#include <Grid/parallelIO/MetaData.h>
|
||||
#include <Grid/qcd/hmc/HMC_aggregate.h>
|
||||
|
||||
#endif
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <ctime>
|
||||
#include <sys/time.h>
|
||||
#include <chrono>
|
||||
#include <zlib.h>
|
||||
|
||||
///////////////////
|
||||
// Grid config
|
||||
|
@ -50,7 +50,6 @@ public:
|
||||
|
||||
GridBase(const std::vector<int> & processor_grid) : CartesianCommunicator(processor_grid) {};
|
||||
|
||||
|
||||
// Physics Grid information.
|
||||
std::vector<int> _simd_layout;// Which dimensions get relayed out over simd lanes.
|
||||
std::vector<int> _fdimensions;// (full) Global dimensions of array prior to cb removal
|
||||
@ -63,13 +62,12 @@ public:
|
||||
int _isites;
|
||||
int _fsites; // _isites*_osites = product(dimensions).
|
||||
int _gsites;
|
||||
std::vector<int> _slice_block; // subslice information
|
||||
std::vector<int> _slice_block;// subslice information
|
||||
std::vector<int> _slice_stride;
|
||||
std::vector<int> _slice_nblock;
|
||||
|
||||
// Might need these at some point
|
||||
// std::vector<int> _lstart; // local start of array in gcoors. _processor_coor[d]*_ldimensions[d]
|
||||
// std::vector<int> _lend; // local end of array in gcoors _processor_coor[d]*_ldimensions[d]+_ldimensions_[d]-1
|
||||
std::vector<int> _lstart; // local start of array in gcoors _processor_coor[d]*_ldimensions[d]
|
||||
std::vector<int> _lend ; // local end of array in gcoors _processor_coor[d]*_ldimensions[d]+_ldimensions_[d]-1
|
||||
|
||||
public:
|
||||
|
||||
@ -176,6 +174,7 @@ public:
|
||||
inline int gSites(void) const { return _isites*_osites*_Nprocessors; };
|
||||
inline int Nd (void) const { return _ndimension;};
|
||||
|
||||
inline const std::vector<int> LocalStarts(void) { return _lstart; };
|
||||
inline const std::vector<int> &FullDimensions(void) { return _fdimensions;};
|
||||
inline const std::vector<int> &GlobalDimensions(void) { return _gdimensions;};
|
||||
inline const std::vector<int> &LocalDimensions(void) { return _ldimensions;};
|
||||
|
@ -76,6 +76,8 @@ public:
|
||||
_ldimensions.resize(_ndimension);
|
||||
_rdimensions.resize(_ndimension);
|
||||
_simd_layout.resize(_ndimension);
|
||||
_lstart.resize(_ndimension);
|
||||
_lend.resize(_ndimension);
|
||||
|
||||
_ostride.resize(_ndimension);
|
||||
_istride.resize(_ndimension);
|
||||
@ -94,6 +96,8 @@ public:
|
||||
// Use a reduced simd grid
|
||||
_ldimensions[d]= _gdimensions[d]/_processors[d]; //local dimensions
|
||||
_rdimensions[d]= _ldimensions[d]/_simd_layout[d]; //overdecomposition
|
||||
_lstart[d] = _processor_coor[d]*_ldimensions[d];
|
||||
_lend[d] = _processor_coor[d]*_ldimensions[d]+_ldimensions[d]-1;
|
||||
_osites *= _rdimensions[d];
|
||||
_isites *= _simd_layout[d];
|
||||
|
||||
|
@ -151,6 +151,8 @@ public:
|
||||
_ldimensions.resize(_ndimension);
|
||||
_rdimensions.resize(_ndimension);
|
||||
_simd_layout.resize(_ndimension);
|
||||
_lstart.resize(_ndimension);
|
||||
_lend.resize(_ndimension);
|
||||
|
||||
_ostride.resize(_ndimension);
|
||||
_istride.resize(_ndimension);
|
||||
@ -169,6 +171,8 @@ public:
|
||||
_gdimensions[d] = _gdimensions[d]/2; // Remove a checkerboard
|
||||
}
|
||||
_ldimensions[d] = _gdimensions[d]/_processors[d];
|
||||
_lstart[d] = _processor_coor[d]*_ldimensions[d];
|
||||
_lend[d] = _processor_coor[d]*_ldimensions[d]+_ldimensions[d]-1;
|
||||
|
||||
// Use a reduced simd grid
|
||||
_simd_layout[d] = simd_layout[d];
|
||||
|
@ -177,6 +177,8 @@ class CartesianCommunicator {
|
||||
void GlobalSumVector(ComplexF *c,int N);
|
||||
void GlobalSum(ComplexD &c);
|
||||
void GlobalSumVector(ComplexD *c,int N);
|
||||
void GlobalXOR(uint32_t &);
|
||||
void GlobalXOR(uint64_t &);
|
||||
|
||||
template<class obj> void GlobalSum(obj &o){
|
||||
typedef typename obj::scalar_type scalar_type;
|
||||
|
@ -83,6 +83,14 @@ void CartesianCommunicator::GlobalSum(uint64_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalXOR(uint32_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_BXOR,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalXOR(uint64_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(float &f){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
|
@ -510,6 +510,14 @@ void CartesianCommunicator::GlobalSum(uint64_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalXOR(uint32_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_BXOR,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalXOR(uint64_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(float &f){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
|
@ -59,6 +59,8 @@ void CartesianCommunicator::GlobalSum(double &){}
|
||||
void CartesianCommunicator::GlobalSum(uint32_t &){}
|
||||
void CartesianCommunicator::GlobalSum(uint64_t &){}
|
||||
void CartesianCommunicator::GlobalSumVector(double *,int N){}
|
||||
void CartesianCommunicator::GlobalXOR(uint32_t &){}
|
||||
void CartesianCommunicator::GlobalXOR(uint64_t &){}
|
||||
|
||||
void CartesianCommunicator::SendRecvPacket(void *xmit,
|
||||
void *recv,
|
||||
|
@ -125,57 +125,94 @@ class BinaryIO {
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// more byte manipulation helpers
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
static inline void Uint32Checksum(uint32_t *buf,uint64_t buf_size_bytes,uint32_t &csum)
|
||||
|
||||
template<class vobj> static inline void Uint32Checksum(Lattice<vobj> &lat,
|
||||
uint32_t &nersc_csum,
|
||||
uint32_t &scidac_csuma,
|
||||
uint32_t &scidac_csumb)
|
||||
|
||||
{
|
||||
typedef typename vobj::scalar_object sobj;
|
||||
|
||||
GridBase *grid = lat._grid;
|
||||
int lsites = grid->lSites();
|
||||
|
||||
std::vector<sobj> scalardata(lsites);
|
||||
unvectorizeToLexOrdArray(scalardata,lat);
|
||||
|
||||
Uint32Checksum(grid,scalardata,nersc_csum,scidac_csuma,scidac_csumb);
|
||||
}
|
||||
|
||||
template<class fobj>
|
||||
static inline void Uint32Checksum(GridBase *grid,
|
||||
std::vector<fobj> &fbuf,
|
||||
uint32_t &nersc_csum,
|
||||
uint32_t &scidac_csuma,
|
||||
uint32_t &scidac_csumb)
|
||||
{
|
||||
const uint64_t size32 = sizeof(fobj)/sizeof(uint32_t);
|
||||
|
||||
|
||||
int nd = grid->_ndimension;
|
||||
|
||||
uint64_t lsites =grid->lSites();
|
||||
std::vector<int> local_vol =grid->LocalDimensions();
|
||||
std::vector<int> local_start =grid->LocalStarts();
|
||||
std::vector<int> global_vol =grid->FullDimensions();
|
||||
|
||||
#pragma omp parallel
|
||||
{
|
||||
uint32_t csum_thr=0;
|
||||
uint64_t count = buf_size_bytes/sizeof(uint32_t);
|
||||
std::vector<int> coor(nd);
|
||||
uint32_t nersc_csum_thr=0;
|
||||
uint32_t scidac_csuma_thr=0;
|
||||
uint32_t scidac_csumb_thr=0;
|
||||
uint32_t site_crc=0;
|
||||
uint32_t zcrc = crc32(0L, Z_NULL, 0);
|
||||
|
||||
#pragma omp for
|
||||
for(uint64_t i=0;i<count;i++){
|
||||
csum_thr=csum_thr+buf[i];
|
||||
for(uint64_t local_site=0;local_site<lsites;local_site++){
|
||||
|
||||
uint32_t * site_buf = (uint32_t *)&fbuf[local_site];
|
||||
|
||||
for(uint64_t j=0;j<size32;j++){
|
||||
nersc_csum_thr=nersc_csum_thr+site_buf[j];
|
||||
}
|
||||
|
||||
/*
|
||||
* Scidac csum is rather more heavyweight
|
||||
*/
|
||||
int global_site;
|
||||
|
||||
Lexicographic::CoorFromIndex(coor,local_site,local_vol);
|
||||
|
||||
for(int d=0;d<nd;d++)
|
||||
coor[d] = coor[d]+local_start[d];
|
||||
|
||||
Lexicographic::IndexFromCoor(coor,global_site,global_vol);
|
||||
|
||||
uint32_t gsite29 = global_site%29;
|
||||
uint32_t gsite31 = global_site%31;
|
||||
|
||||
site_crc = crc32(zcrc,(unsigned char *)site_buf,sizeof(fobj));
|
||||
|
||||
scidac_csuma_thr ^= site_crc<<gsite29 | site_crc>>(32-gsite29);
|
||||
scidac_csumb_thr ^= site_crc<<gsite31 | site_crc>>(32-gsite31);
|
||||
}
|
||||
|
||||
#pragma omp critical
|
||||
csum = csum + csum_thr;
|
||||
{
|
||||
nersc_csum += nersc_csum_thr;
|
||||
scidac_csuma^= scidac_csuma_thr;
|
||||
scidac_csumb^= scidac_csumb_thr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Network is big endian
|
||||
static inline void htobe32_v(void *file_object,uint64_t bytes,uint32_t &csum){
|
||||
Uint32Checksum((uint32_t *)file_object,bytes,csum);
|
||||
htobe32_v(file_object,bytes);
|
||||
}
|
||||
static inline void htobe64_v(void *file_object,uint64_t bytes,uint32_t &csum){
|
||||
Uint32Checksum((uint32_t *)file_object,bytes,csum);
|
||||
htobe64_v(file_object,bytes);
|
||||
}
|
||||
static inline void htole32_v(void *file_object,uint64_t bytes,uint32_t &csum){
|
||||
Uint32Checksum((uint32_t *)file_object,bytes,csum);
|
||||
htole32_v(file_object,bytes);
|
||||
}
|
||||
static inline void htole64_v(void *file_object,uint64_t bytes,uint32_t &csum){
|
||||
Uint32Checksum((uint32_t *)file_object,bytes,csum);
|
||||
htole64_v(file_object,bytes);
|
||||
}
|
||||
static inline void be32toh_v(void *file_object,uint64_t bytes,uint32_t &csum){
|
||||
be32toh_v(file_object,bytes);
|
||||
Uint32Checksum((uint32_t *)file_object,bytes,csum);
|
||||
}
|
||||
static inline void be64toh_v(void *file_object,uint64_t bytes,uint32_t &csum){
|
||||
be64toh_v(file_object,bytes);
|
||||
Uint32Checksum((uint32_t *)file_object,bytes,csum);
|
||||
}
|
||||
static inline void le32toh_v(void *file_object,uint64_t bytes,uint32_t &csum){
|
||||
le32toh_v(file_object,bytes);
|
||||
Uint32Checksum((uint32_t *)file_object,bytes,csum);
|
||||
}
|
||||
static inline void le64toh_v(void *file_object,uint64_t bytes,uint32_t &csum){
|
||||
le64toh_v(file_object,bytes);
|
||||
Uint32Checksum((uint32_t *)file_object,bytes,csum);
|
||||
}
|
||||
static inline void htobe32_v(void *file_object,uint64_t bytes){ be32toh_v(file_object,bytes);}
|
||||
static inline void htobe64_v(void *file_object,uint64_t bytes){ be64toh_v(file_object,bytes);}
|
||||
static inline void htole32_v(void *file_object,uint64_t bytes){ le32toh_v(file_object,bytes);}
|
||||
static inline void htole64_v(void *file_object,uint64_t bytes){ le64toh_v(file_object,bytes);}
|
||||
static inline void htobe32_v(void *file_object,uint32_t bytes){ be32toh_v(file_object,bytes);}
|
||||
static inline void htobe64_v(void *file_object,uint32_t bytes){ be64toh_v(file_object,bytes);}
|
||||
static inline void htole32_v(void *file_object,uint32_t bytes){ le32toh_v(file_object,bytes);}
|
||||
static inline void htole64_v(void *file_object,uint32_t bytes){ le64toh_v(file_object,bytes);}
|
||||
|
||||
static inline void be32toh_v(void *file_object,uint64_t bytes)
|
||||
{
|
||||
@ -199,6 +236,7 @@ class BinaryIO {
|
||||
fp[i] = ntohl(f);
|
||||
}
|
||||
}
|
||||
|
||||
// BE is same as network
|
||||
static inline void be64toh_v(void *file_object,uint64_t bytes)
|
||||
{
|
||||
@ -238,18 +276,23 @@ class BinaryIO {
|
||||
static const int BINARYIO_WRITE = 0x01;
|
||||
|
||||
template<class word,class fobj>
|
||||
static inline uint32_t IOobject(word w,
|
||||
static inline void IOobject(word w,
|
||||
GridBase *grid,
|
||||
std::vector<fobj> &iodata,
|
||||
std::string file,
|
||||
int offset,
|
||||
const std::string &format, int control)
|
||||
const std::string &format, int control,
|
||||
uint32_t &nersc_csum,
|
||||
uint32_t &scidac_csuma,
|
||||
uint32_t &scidac_csumb)
|
||||
{
|
||||
grid->Barrier();
|
||||
GridStopWatch timer;
|
||||
GridStopWatch bstimer;
|
||||
|
||||
uint32_t csum=0;
|
||||
nersc_csum=0;
|
||||
scidac_csuma=0;
|
||||
scidac_csumb=0;
|
||||
|
||||
int ndim = grid->Dimensions();
|
||||
int nrank = grid->ProcessorCount();
|
||||
@ -359,20 +402,22 @@ class BinaryIO {
|
||||
grid->Barrier();
|
||||
|
||||
bstimer.Start();
|
||||
if (ieee32big) be32toh_v((void *)&iodata[0], sizeof(fobj)*iodata.size(),csum);
|
||||
if (ieee32) le32toh_v((void *)&iodata[0], sizeof(fobj)*iodata.size(),csum);
|
||||
if (ieee64big) be64toh_v((void *)&iodata[0], sizeof(fobj)*iodata.size(),csum);
|
||||
if (ieee64) le64toh_v((void *)&iodata[0], sizeof(fobj)*iodata.size(),csum);
|
||||
if (ieee32big) be32toh_v((void *)&iodata[0], sizeof(fobj)*iodata.size());
|
||||
if (ieee32) le32toh_v((void *)&iodata[0], sizeof(fobj)*iodata.size());
|
||||
if (ieee64big) be64toh_v((void *)&iodata[0], sizeof(fobj)*iodata.size());
|
||||
if (ieee64) le64toh_v((void *)&iodata[0], sizeof(fobj)*iodata.size());
|
||||
Uint32Checksum(grid,iodata,nersc_csum,scidac_csuma,scidac_csumb);
|
||||
bstimer.Stop();
|
||||
}
|
||||
|
||||
if ( control & BINARYIO_WRITE ) {
|
||||
|
||||
bstimer.Start();
|
||||
if (ieee32big) htobe32_v((void *)&iodata[0], sizeof(fobj)*iodata.size(),csum);
|
||||
if (ieee32) htole32_v((void *)&iodata[0], sizeof(fobj)*iodata.size(),csum);
|
||||
if (ieee64big) htobe64_v((void *)&iodata[0], sizeof(fobj)*iodata.size(),csum);
|
||||
if (ieee64) htole64_v((void *)&iodata[0], sizeof(fobj)*iodata.size(),csum);
|
||||
Uint32Checksum(grid,iodata,nersc_csum,scidac_csuma,scidac_csumb);
|
||||
if (ieee32big) htobe32_v((void *)&iodata[0], sizeof(fobj)*iodata.size());
|
||||
if (ieee32) htole32_v((void *)&iodata[0], sizeof(fobj)*iodata.size());
|
||||
if (ieee64big) htobe64_v((void *)&iodata[0], sizeof(fobj)*iodata.size());
|
||||
if (ieee64) htole64_v((void *)&iodata[0], sizeof(fobj)*iodata.size());
|
||||
bstimer.Stop();
|
||||
|
||||
grid->Barrier();
|
||||
@ -418,17 +463,27 @@ class BinaryIO {
|
||||
// Safety check
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
grid->Barrier();
|
||||
grid->GlobalSum(csum);
|
||||
grid->GlobalSum(nersc_csum);
|
||||
grid->GlobalXOR(scidac_csuma);
|
||||
grid->GlobalXOR(scidac_csumb);
|
||||
grid->Barrier();
|
||||
|
||||
return csum;
|
||||
// std::cout << "Binary IO NERSC checksum 0x"<<std::hex<<nersc_csum <<std::dec<<std::endl;
|
||||
// std::cout << "Binary IO SCIDAC checksuma 0x"<<std::hex<<scidac_csuma<<std::dec<<std::endl;
|
||||
// std::cout << "Binary IO SCIDAC checksumb 0x"<<std::hex<<scidac_csumb<<std::dec<<std::endl;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// Read a Lattice of object
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
template<class vobj,class fobj,class munger>
|
||||
static inline uint32_t readLatticeObject(Lattice<vobj> &Umu,std::string file,munger munge,int offset,const std::string &format)
|
||||
static inline void readLatticeObject(Lattice<vobj> &Umu,
|
||||
std::string file,
|
||||
munger munge,
|
||||
int offset,
|
||||
const std::string &format,
|
||||
uint32_t &nersc_csum,
|
||||
uint32_t &scidac_csuma,
|
||||
uint32_t &scidac_csumb)
|
||||
{
|
||||
typedef typename vobj::scalar_object sobj;
|
||||
typedef typename vobj::Realified::scalar_type word; word w=0;
|
||||
@ -439,7 +494,8 @@ class BinaryIO {
|
||||
std::vector<sobj> scalardata(lsites);
|
||||
std::vector<fobj> iodata(lsites); // Munge, checksum, byte order in here
|
||||
|
||||
uint32_t csum= IOobject(w,grid,iodata,file,offset,format,BINARYIO_READ|BINARYIO_LEXICOGRAPHIC);
|
||||
IOobject(w,grid,iodata,file,offset,format,BINARYIO_READ|BINARYIO_LEXICOGRAPHIC,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
|
||||
GridStopWatch timer;
|
||||
timer.Start();
|
||||
@ -451,15 +507,20 @@ class BinaryIO {
|
||||
|
||||
timer.Stop();
|
||||
std::cout<<GridLogMessage<<"readLatticeObject: vectorize overhead "<<timer.Elapsed() <<std::endl;
|
||||
|
||||
return csum;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// Write a Lattice of object
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
template<class vobj,class fobj,class munger>
|
||||
static inline uint32_t writeLatticeObject(Lattice<vobj> &Umu,std::string file,munger munge,int offset,const std::string &format)
|
||||
static inline void writeLatticeObject(Lattice<vobj> &Umu,
|
||||
std::string file,
|
||||
munger munge,
|
||||
int offset,
|
||||
const std::string &format,
|
||||
uint32_t &nersc_csum,
|
||||
uint32_t &scidac_csuma,
|
||||
uint32_t &scidac_csumb)
|
||||
{
|
||||
typedef typename vobj::scalar_object sobj;
|
||||
typedef typename vobj::Realified::scalar_type word; word w=0;
|
||||
@ -480,36 +541,45 @@ class BinaryIO {
|
||||
grid->Barrier();
|
||||
timer.Stop();
|
||||
|
||||
uint32_t csum= IOobject(w,grid,iodata,file,offset,format,BINARYIO_WRITE|BINARYIO_LEXICOGRAPHIC);
|
||||
IOobject(w,grid,iodata,file,offset,format,BINARYIO_WRITE|BINARYIO_LEXICOGRAPHIC,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
|
||||
std::cout<<GridLogMessage<<"writeLatticeObject: unvectorize overhead "<<timer.Elapsed() <<std::endl;
|
||||
|
||||
return csum;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// Read a RNG; use IOobject and lexico map to an array of state
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
static inline uint32_t readRNG(GridSerialRNG &serial,GridParallelRNG ¶llel,std::string file,int offset)
|
||||
static inline void readRNG(GridSerialRNG &serial,
|
||||
GridParallelRNG ¶llel,
|
||||
std::string file,
|
||||
int offset,
|
||||
uint32_t &nersc_csum,
|
||||
uint32_t &scidac_csuma,
|
||||
uint32_t &scidac_csumb)
|
||||
{
|
||||
typedef typename GridSerialRNG::RngStateType RngStateType;
|
||||
const int RngStateCount = GridSerialRNG::RngStateCount;
|
||||
typedef std::array<RngStateType,RngStateCount> RNGstate;
|
||||
typedef RngStateType word; word w=0;
|
||||
|
||||
uint32_t csum = 0;
|
||||
std::string format = "IEEE32BIG";
|
||||
|
||||
GridBase *grid = parallel._grid;
|
||||
int gsites = grid->gSites();
|
||||
int lsites = grid->lSites();
|
||||
|
||||
uint32_t nersc_csum_tmp;
|
||||
uint32_t scidac_csuma_tmp;
|
||||
uint32_t scidac_csumb_tmp;
|
||||
|
||||
GridStopWatch timer;
|
||||
|
||||
std::cout << GridLogMessage << "RNG read I/O on file " << file << std::endl;
|
||||
|
||||
std::vector<RNGstate> iodata(lsites);
|
||||
csum= IOobject(w,grid,iodata,file,offset,format,BINARYIO_READ|BINARYIO_LEXICOGRAPHIC);
|
||||
IOobject(w,grid,iodata,file,offset,format,BINARYIO_READ|BINARYIO_LEXICOGRAPHIC,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
|
||||
timer.Start();
|
||||
parallel_for(int lidx=0;lidx<lsites;lidx++){
|
||||
@ -520,33 +590,49 @@ class BinaryIO {
|
||||
timer.Stop();
|
||||
|
||||
iodata.resize(1);
|
||||
csum+= IOobject(w,grid,iodata,file,offset,format,BINARYIO_READ|BINARYIO_MASTER_APPEND);
|
||||
IOobject(w,grid,iodata,file,offset,format,BINARYIO_READ|BINARYIO_MASTER_APPEND,
|
||||
nersc_csum_tmp,scidac_csuma_tmp,scidac_csumb_tmp);
|
||||
|
||||
{
|
||||
std::vector<RngStateType> tmp(RngStateCount);
|
||||
std::copy(iodata[0].begin(),iodata[0].end(),tmp.begin());
|
||||
serial.SetState(tmp,0);
|
||||
}
|
||||
|
||||
std::cout << GridLogMessage << "RNG file checksum " << std::hex << csum << std::dec << std::endl;
|
||||
nersc_csum = nersc_csum + nersc_csum_tmp;
|
||||
scidac_csuma = scidac_csuma ^ scidac_csuma_tmp;
|
||||
scidac_csumb = scidac_csumb ^ scidac_csumb_tmp;
|
||||
|
||||
// std::cout << GridLogMessage << "RNG file nersc_checksum " << std::hex << nersc_csum << std::dec << std::endl;
|
||||
// std::cout << GridLogMessage << "RNG file scidac_checksuma " << std::hex << scidac_csuma << std::dec << std::endl;
|
||||
// std::cout << GridLogMessage << "RNG file scidac_checksumb " << std::hex << scidac_csumb << std::dec << std::endl;
|
||||
|
||||
std::cout << GridLogMessage << "RNG state overhead " << timer.Elapsed() << std::endl;
|
||||
return csum;
|
||||
}
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// Write a RNG; lexico map to an array of state and use IOobject
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
static inline uint32_t writeRNG(GridSerialRNG &serial,GridParallelRNG ¶llel,std::string file,int offset)
|
||||
static inline void writeRNG(GridSerialRNG &serial,
|
||||
GridParallelRNG ¶llel,
|
||||
std::string file,
|
||||
int offset,
|
||||
uint32_t &nersc_csum,
|
||||
uint32_t &scidac_csuma,
|
||||
uint32_t &scidac_csumb)
|
||||
{
|
||||
typedef typename GridSerialRNG::RngStateType RngStateType;
|
||||
typedef RngStateType word; word w=0;
|
||||
const int RngStateCount = GridSerialRNG::RngStateCount;
|
||||
typedef std::array<RngStateType,RngStateCount> RNGstate;
|
||||
|
||||
uint32_t csum = 0;
|
||||
|
||||
GridBase *grid = parallel._grid;
|
||||
int gsites = grid->gSites();
|
||||
int lsites = grid->lSites();
|
||||
|
||||
uint32_t nersc_csum_tmp;
|
||||
uint32_t scidac_csuma_tmp;
|
||||
uint32_t scidac_csumb_tmp;
|
||||
|
||||
GridStopWatch timer;
|
||||
std::string format = "IEEE32BIG";
|
||||
|
||||
@ -561,7 +647,8 @@ class BinaryIO {
|
||||
}
|
||||
timer.Stop();
|
||||
|
||||
csum= IOobject(w,grid,iodata,file,offset,format,BINARYIO_WRITE|BINARYIO_LEXICOGRAPHIC);
|
||||
IOobject(w,grid,iodata,file,offset,format,BINARYIO_WRITE|BINARYIO_LEXICOGRAPHIC,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
|
||||
iodata.resize(1);
|
||||
{
|
||||
@ -569,11 +656,11 @@ class BinaryIO {
|
||||
serial.GetState(tmp,0);
|
||||
std::copy(tmp.begin(),tmp.end(),iodata[0].begin());
|
||||
}
|
||||
csum+= IOobject(w,grid,iodata,file,offset,format,BINARYIO_WRITE|BINARYIO_MASTER_APPEND);
|
||||
IOobject(w,grid,iodata,file,offset,format,BINARYIO_WRITE|BINARYIO_MASTER_APPEND,
|
||||
nersc_csum_tmp,scidac_csuma_tmp,scidac_csumb_tmp);
|
||||
|
||||
std::cout << GridLogMessage << "RNG file checksum " << std::hex << csum << std::dec << std::endl;
|
||||
// std::cout << GridLogMessage << "RNG file checksum " << std::hex << csum << std::dec << std::endl;
|
||||
std::cout << GridLogMessage << "RNG state overhead " << timer.Elapsed() << std::endl;
|
||||
return csum;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -43,201 +43,351 @@ extern "C" { // for linkage
|
||||
#include "lime.h"
|
||||
}
|
||||
|
||||
|
||||
// Unused SCIDAC records names
|
||||
// SCIDAC_PRIVATE_FILE_XML "scidac-private-file-xml"
|
||||
// SCIDAC_SITELIST "scidac-sitelist"
|
||||
// SCIDAC_FILE_XML "scidac-file-xml"
|
||||
// SCIDAC_RIVATE_RECORD_XML "scidac-private-record-xml"
|
||||
// SCIDAC_RECORD_XML "scidac-record-xml"
|
||||
// SCIDAC_BINARY_DATA "scidac-binary-data"
|
||||
//
|
||||
// Scidac checksum: CRC32 every site, xor reduce some hash of this.
|
||||
// https://github.com/usqcd-software/qio/blob/master/lib/dml/DML_utils.c
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
inline void ILDGGrid(GridBase *grid, ILDGField &header) {
|
||||
assert(grid->_ndimension == 4); // emit error if not
|
||||
header.dimension.resize(4);
|
||||
header.boundary.resize(4);
|
||||
for (int d = 0; d < 4; d++) {
|
||||
header.dimension[d] = grid->_fdimensions[d];
|
||||
// Read boundary conditions from ... ?
|
||||
header.boundary[d] = std::string("periodic");
|
||||
}
|
||||
}
|
||||
|
||||
inline void ILDGChecksum(uint32_t *buf, uint32_t buf_size_bytes,
|
||||
uint32_t &csum) {
|
||||
BinaryIO::Uint32Checksum(buf, buf_size_bytes, csum);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Utilities ; these are QCD aware
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
template <class GaugeField>
|
||||
inline void ILDGStatistics(GaugeField &data, ILDGField &header) {
|
||||
// How to convert data precision etc...
|
||||
header.link_trace = Grid::QCD::WilsonLoops<PeriodicGimplR>::linkTrace(data);
|
||||
header.plaquette = Grid::QCD::WilsonLoops<PeriodicGimplR>::avgPlaquette(data);
|
||||
// header.polyakov =
|
||||
}
|
||||
|
||||
// Forcing QCD here
|
||||
template <class fobj, class sobj>
|
||||
struct ILDGMunger {
|
||||
void operator()(fobj &in, sobj &out, uint32_t &csum) {
|
||||
for (int mu = 0; mu < 4; mu++) {
|
||||
for (int i = 0; i < 3; i++) {
|
||||
for (int j = 0; j < 3; j++) {
|
||||
out(mu)()(i, j) = in(mu)()(i, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
ILDGChecksum((uint32_t *)&in, sizeof(in), csum);
|
||||
};
|
||||
};
|
||||
|
||||
template <class fobj, class sobj>
|
||||
struct ILDGUnmunger {
|
||||
void operator()(sobj &in, fobj &out, uint32_t &csum) {
|
||||
for (int mu = 0; mu < 4; mu++) {
|
||||
for (int i = 0; i < 3; i++) {
|
||||
for (int j = 0; j < 3; j++) {
|
||||
out(mu)()(i, j) = in(mu)()(i, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
ILDGChecksum((uint32_t *)&out, sizeof(out), csum);
|
||||
};
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Write and read from fstream; compute header offset for payload
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
enum ILDGstate {ILDGread, ILDGwrite};
|
||||
|
||||
class ILDGIO : public BinaryIO {
|
||||
FILE *File;
|
||||
LimeWriter *LimeW;
|
||||
LimeRecordHeader *LimeHeader;
|
||||
LimeReader *LimeR;
|
||||
std::string filename;
|
||||
|
||||
|
||||
class IldgIO : public BinaryIO {
|
||||
public:
|
||||
ILDGIO(std::string file, ILDGstate RW) {
|
||||
filename = file;
|
||||
if (RW == ILDGwrite){
|
||||
File = fopen(file.c_str(), "w");
|
||||
// check if opened correctly
|
||||
|
||||
LimeW = limeCreateWriter(File);
|
||||
} else {
|
||||
File = fopen(file.c_str(), "r");
|
||||
// check if opened correctly
|
||||
|
||||
LimeR = limeCreateReader(File);
|
||||
}
|
||||
}
|
||||
|
||||
~ILDGIO() { fclose(File); }
|
||||
|
||||
int createHeader(std::string message, int MB, int ME, size_t PayloadSize, LimeWriter* L){
|
||||
static int createHeader(std::string message, int MB, int ME, size_t PayloadSize, LimeWriter* L)
|
||||
{
|
||||
LimeRecordHeader *h;
|
||||
h = limeCreateHeader(MB, ME, const_cast<char *>(message.c_str()), PayloadSize);
|
||||
int status = limeWriteRecordHeader(h, L);
|
||||
if (status < 0) {
|
||||
std::cerr << "ILDG Header error\n";
|
||||
return status;
|
||||
}
|
||||
assert(limeWriteRecordHeader(h, L) >= 0);
|
||||
limeDestroyHeader(h);
|
||||
return LIME_SUCCESS;
|
||||
}
|
||||
|
||||
unsigned int writeHeader(ILDGField &header) {
|
||||
// write header in LIME
|
||||
n_uint64_t nbytes;
|
||||
int MB_flag = 1, ME_flag = 0;
|
||||
|
||||
char message[] = "ildg-format";
|
||||
nbytes = strlen(message);
|
||||
LimeHeader = limeCreateHeader(MB_flag, ME_flag, message, nbytes);
|
||||
limeWriteRecordHeader(LimeHeader, LimeW);
|
||||
limeDestroyHeader(LimeHeader);
|
||||
// save the xml header here
|
||||
// use the xml_writer to c++ streams in pugixml
|
||||
// and convert to char message
|
||||
limeWriteRecordData(message, &nbytes, LimeW);
|
||||
template<class serialisable_object>
|
||||
static void writeLimeObject(int MB,int ME,serialisable_object &object,std::string object_name,std::string record_name, LimeWriter *LimeW)
|
||||
{
|
||||
std::string xmlstring;
|
||||
{
|
||||
XmlWriter WR("","");
|
||||
write(WR,object_name,object);
|
||||
xmlstring = WR.XmlString();
|
||||
}
|
||||
uint64_t nbytes = xmlstring.size();
|
||||
LimeRecordHeader *h = limeCreateHeader(MB, ME,(char *)record_name.c_str(), nbytes);
|
||||
assert(limeWriteRecordHeader(h, LimeW)>=0);
|
||||
assert(limeWriteRecordData(&xmlstring[0], &nbytes, LimeW)>=0);
|
||||
limeWriterCloseRecord(LimeW);
|
||||
|
||||
return 0;
|
||||
limeDestroyHeader(h);
|
||||
}
|
||||
|
||||
unsigned int readHeader(ILDGField &header) {
|
||||
static unsigned int writeHeader(FieldMetaData &header, LimeWriter *LimeW) {
|
||||
|
||||
uint64_t nbytes;
|
||||
|
||||
ildgFormat ildgfmt ;
|
||||
usqcdInfo info;
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// Fill ILDG header data struct
|
||||
//////////////////////////////////////////////////////
|
||||
ildgfmt.field = std::string("su3gauge");
|
||||
ildgfmt.precision = 64;
|
||||
ildgfmt.version = 1.0;
|
||||
ildgfmt.lx = header.dimension[0];
|
||||
ildgfmt.ly = header.dimension[1];
|
||||
ildgfmt.lz = header.dimension[2];
|
||||
ildgfmt.lt = header.dimension[3];
|
||||
assert(header.nd==4);
|
||||
assert(header.nd==header.dimension.size());
|
||||
|
||||
info.version=1.0;
|
||||
info.plaq = header.plaquette;
|
||||
info.linktr = header.link_trace;
|
||||
|
||||
// Following scidac file downloaded from NERSC under MILC
|
||||
// Begin message, keep open on successive records
|
||||
//Message 1
|
||||
// Type: scidac-private-file-xml <scidacFile><version>1.1</version><spacetime>4</spacetime><dims>16 16 16 48 </dims><volfmt>0</volfmt></scidacFile>
|
||||
// Type: scidac-file-xml <title>MILC ILDG archival gauge configuration</title>
|
||||
//Message 2
|
||||
// Type: scidac-private-record-xml <scidacRecord><version>1.0</version><date>Thu May 11 00:11:33 2006 UTC</date><globaldata>0</globaldata>
|
||||
// <datatype>QDP_F3_ColorMatrix</datatype><precision>F</precision><colors>3</colors><typesize>72</typesize><datacount>4</datacount></scidacRecord>
|
||||
// Type: scidac-record-xml
|
||||
// Type: ildg-format
|
||||
// Type: ildg-data-lfn
|
||||
// Type: ildg-binary-data
|
||||
// Type: scidac-checksum
|
||||
|
||||
writeLimeObject(1,0,header ,std::string("FieldMetaData"),std::string(GRID_FORMAT),LimeW);
|
||||
writeLimeObject(0,0,info ,std::string("usqcdInfo" ),std::string(USQCD_INFO ),LimeW);
|
||||
writeLimeObject(0,0,ildgfmt,std::string("ildgFormat") ,std::string(ILDG_FORMAT),LimeW);
|
||||
// LFN is not a serializable object
|
||||
{
|
||||
std::string LFN = header.ildg_lfn;
|
||||
uint64_t PayloadSize = LFN.size();
|
||||
createHeader(ILDG_DATA_LFN, 0 , 0, PayloadSize, LimeW);
|
||||
limeWriteRecordData(const_cast<char*>(LFN.c_str()), &PayloadSize, LimeW);
|
||||
limeWriterCloseRecord(LimeW);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <class vsimd>
|
||||
uint32_t readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu) {
|
||||
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
|
||||
typedef LorentzColourMatrixD sobjd;
|
||||
typedef LorentzColourMatrixF sobjf;
|
||||
typedef iLorentzColourMatrix<vsimd> itype;
|
||||
typedef LorentzColourMatrix sobj;
|
||||
GridBase *grid = Umu._grid;
|
||||
static void writeConfiguration(std::string filename,Lattice<iLorentzColourMatrix<vsimd> > &Umu, std::string format) {
|
||||
|
||||
ILDGField header;
|
||||
readHeader(header);
|
||||
FILE *File = fopen(filename.c_str(), "w");
|
||||
LimeWriter *LimeW = limeCreateWriter(File);
|
||||
|
||||
// now just the conf, ignore the header
|
||||
std::string format = std::string("IEEE64BIG");
|
||||
do {limeReaderNextRecord(LimeR);}
|
||||
while (strncmp(limeReaderType(LimeR), "ildg-binary-data",16));
|
||||
|
||||
n_uint64_t nbytes = limeReaderBytes(LimeR);//size of this record (configuration)
|
||||
|
||||
|
||||
ILDGtype ILDGt(true, LimeR);
|
||||
// this is special for double prec data, just for the moment
|
||||
uint32_t csum = BinaryIO::readObjectParallel< itype, sobjd >(
|
||||
Umu, filename, ILDGMunger<sobjd, sobj>(), 0, format, ILDGt);
|
||||
|
||||
// Check configuration
|
||||
// todo
|
||||
|
||||
return csum;
|
||||
}
|
||||
|
||||
template <class vsimd>
|
||||
uint32_t writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu, std::string format) {
|
||||
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
|
||||
typedef iLorentzColourMatrix<vsimd> vobj;
|
||||
typedef typename vobj::scalar_object sobj;
|
||||
typedef LorentzColourMatrixD fobj;
|
||||
|
||||
ILDGField header;
|
||||
// fill the header
|
||||
GridBase * grid = Umu._grid;
|
||||
|
||||
////////////////////////////////////////
|
||||
// fill the headers
|
||||
////////////////////////////////////////
|
||||
FieldMetaData header;
|
||||
|
||||
GridMetaData(grid,header);
|
||||
GaugeStatistics<GaugeField>(Umu,header);
|
||||
MachineCharacteristics(header);
|
||||
|
||||
assert( (format=="IEEE64BIG") || (format=="IEEE32BIG"));
|
||||
header.floating_point = format;
|
||||
header.checksum = 0x0; // unused in ILDG
|
||||
writeHeader(header,LimeW);
|
||||
|
||||
ILDGUnmunger<fobj, sobj> munge;
|
||||
unsigned int offset = writeHeader(header);
|
||||
|
||||
BinaryIO::Uint32Checksum<vobj, fobj>(Umu, munge, header.checksum);
|
||||
|
||||
////////////////////////////////////////
|
||||
// Write data record header
|
||||
n_uint64_t PayloadSize = sizeof(fobj) * Umu._grid->_gsites;
|
||||
createHeader("ildg-binary-data", 0, 1, PayloadSize, LimeW);
|
||||
|
||||
ILDGtype ILDGt(true, LimeW);
|
||||
uint32_t csum = BinaryIO::writeObjectParallel<vobj, fobj>(
|
||||
Umu, filename, munge, 0, header.floating_point, ILDGt);
|
||||
////////////////////////////////////////
|
||||
uint64_t PayloadSize = sizeof(fobj) * Umu._grid->_gsites;
|
||||
createHeader(ILDG_BINARY_DATA, 0, 0, PayloadSize, LimeW);
|
||||
|
||||
off_t offset = ftell(File);
|
||||
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
||||
GaugeSimpleMunger<sobj, fobj> munge;
|
||||
BinaryIO::writeLatticeObject<vobj, fobj >(Umu, filename, munge, offset, header.floating_point,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
limeWriterCloseRecord(LimeW);
|
||||
|
||||
// Last record
|
||||
// the logical file name LNF
|
||||
// look into documentation on how to generate this string
|
||||
std::string LNF = "empty";
|
||||
////////////////////////////////////////
|
||||
// Write checksum element, propagaing forward from the BinaryIO
|
||||
////////////////////////////////////////
|
||||
scidacChecksum checksum;
|
||||
checksum.suma= scidac_csuma;
|
||||
checksum.sumb= scidac_csumb;
|
||||
// std::cout << " writing scidac checksums "<<std::hex<<scidac_csuma<<"/"<<scidac_csumb<<std::dec<<std::endl;
|
||||
writeLimeObject(0,1,checksum,std::string("scidacChecksum" ),std::string(SCIDAC_CHECKSUM),LimeW);
|
||||
|
||||
fclose(File);
|
||||
}
|
||||
|
||||
template <class vsimd>
|
||||
static void readConfiguration(std::string filename,Lattice<iLorentzColourMatrix<vsimd> > &Umu, FieldMetaData &FieldMetaData_) {
|
||||
|
||||
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
|
||||
typedef LorentzColourMatrixD sobjd;
|
||||
typedef LorentzColourMatrixF sobjf;
|
||||
typedef iLorentzColourMatrix<vsimd> itype;
|
||||
typedef LorentzColourMatrix sobj;
|
||||
|
||||
GridBase *grid = Umu._grid;
|
||||
|
||||
std::vector<int> dims = Umu._grid->FullDimensions();
|
||||
assert(dims.size()==4);
|
||||
|
||||
FILE *File = fopen(filename.c_str(), "r");
|
||||
LimeReader *LimeR = limeCreateReader(File);
|
||||
|
||||
|
||||
PayloadSize = sizeof(LNF);
|
||||
createHeader("ildg-binary-lfn", 1 , 1, PayloadSize, LimeW);
|
||||
limeWriteRecordData(const_cast<char*>(LNF.c_str()), &PayloadSize, LimeW);
|
||||
// Metadata holders
|
||||
ildgFormat ildgFormat_ ;
|
||||
std::string ildgLFN_ ;
|
||||
scidacChecksum scidacChecksum_;
|
||||
usqcdInfo usqcdInfo_ ;
|
||||
|
||||
limeWriterCloseRecord(LimeW);
|
||||
// track what we read from file
|
||||
int found_ildgFormat =0;
|
||||
int found_ildgLFN =0;
|
||||
int found_scidacChecksum=0;
|
||||
int found_usqcdInfo =0;
|
||||
int found_ildgBinary =0;
|
||||
int found_FieldMetaData =0;
|
||||
|
||||
return csum;
|
||||
uint32_t nersc_csum;
|
||||
uint32_t scidac_csuma;
|
||||
uint32_t scidac_csumb;
|
||||
|
||||
// Binary format
|
||||
std::string format;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// Loop over all records
|
||||
// -- Order is poorly guaranteed except ILDG header preceeds binary section.
|
||||
// -- Run like an event loop.
|
||||
// -- Impose trust hierarchy. Grid takes precedence & look for ILDG, and failing
|
||||
// that Scidac.
|
||||
// -- Insist on Scidac checksum record.
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
|
||||
while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {
|
||||
|
||||
uint64_t nbytes = limeReaderBytes(LimeR);//size of this record (configuration)
|
||||
|
||||
//////////////////////////////////////////////////////////////////
|
||||
// If not BINARY_DATA read a string and parse
|
||||
//////////////////////////////////////////////////////////////////
|
||||
if ( strncmp(limeReaderType(LimeR), ILDG_BINARY_DATA,strlen(ILDG_BINARY_DATA) ) ) {
|
||||
|
||||
// Copy out the string
|
||||
std::vector<char> xmlc(nbytes+1,'\0');
|
||||
limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR);
|
||||
std::cout << GridLogMessage<< "Non binary record :" <<limeReaderType(LimeR) <<std::endl; //<<"\n"<<(&xmlc[0])<<std::endl;
|
||||
|
||||
//////////////////////////////////
|
||||
// ILDG format record
|
||||
if ( !strncmp(limeReaderType(LimeR), ILDG_FORMAT,strlen(ILDG_FORMAT)) ) {
|
||||
|
||||
XmlReader RD(&xmlc[0],"");
|
||||
read(RD,"ildgFormat",ildgFormat_);
|
||||
|
||||
if ( ildgFormat_.precision == 64 ) format = std::string("IEEE64BIG");
|
||||
if ( ildgFormat_.precision == 32 ) format = std::string("IEEE32BIG");
|
||||
|
||||
// std::cout << "This is an ILDG format record : "<<format<<std::endl;
|
||||
|
||||
assert( ildgFormat_.lx == dims[0]);
|
||||
assert( ildgFormat_.ly == dims[1]);
|
||||
assert( ildgFormat_.lz == dims[2]);
|
||||
assert( ildgFormat_.lt == dims[3]);
|
||||
|
||||
found_ildgFormat = 1;
|
||||
}
|
||||
|
||||
if ( !strncmp(limeReaderType(LimeR), ILDG_DATA_LFN,strlen(ILDG_DATA_LFN)) ) {
|
||||
FieldMetaData_.ildg_lfn = std::string(&xmlc[0]);
|
||||
// std::cout << "ILDG logical file name "<< FieldMetaData_.ildg_lfn << std::endl;
|
||||
found_ildgLFN = 1;
|
||||
}
|
||||
|
||||
if ( !strncmp(limeReaderType(LimeR), GRID_FORMAT,strlen(ILDG_FORMAT)) ) {
|
||||
|
||||
XmlReader RD(&xmlc[0],"");
|
||||
read(RD,"FieldMetaData",FieldMetaData_);
|
||||
|
||||
// std::cout << "Grid header found : format is "<<FieldMetaData_.floating_point<<std::endl;
|
||||
|
||||
format = FieldMetaData_.floating_point;
|
||||
|
||||
assert(FieldMetaData_.dimension[0] == dims[0]);
|
||||
assert(FieldMetaData_.dimension[1] == dims[1]);
|
||||
assert(FieldMetaData_.dimension[2] == dims[2]);
|
||||
assert(FieldMetaData_.dimension[3] == dims[3]);
|
||||
|
||||
found_FieldMetaData = 1;
|
||||
}
|
||||
|
||||
if ( !strncmp(limeReaderType(LimeR), USQCD_INFO,strlen(USQCD_INFO)) ) {
|
||||
XmlReader RD(&xmlc[0],"");
|
||||
read(RD,USQCD_INFO,usqcdInfo_);
|
||||
// std::cout << "USQCD info record found " <<std::endl;
|
||||
found_usqcdInfo = 1;
|
||||
}
|
||||
|
||||
if ( !strncmp(limeReaderType(LimeR), SCIDAC_CHECKSUM,strlen(SCIDAC_CHECKSUM)) ) {
|
||||
XmlReader RD(&xmlc[0],"");
|
||||
read(RD,"scidacChecksum",scidacChecksum_);
|
||||
FieldMetaData_.scidac_checksuma = scidacChecksum_.suma;
|
||||
FieldMetaData_.scidac_checksumb = scidacChecksum_.sumb;
|
||||
//std::cout << " Read Out "<<scidacChecksum_.version<<"/"<< scidacChecksum_.suma<<"/"<<scidacChecksum_.sumb<<std::endl;
|
||||
found_scidacChecksum = 1;
|
||||
}
|
||||
|
||||
} else {
|
||||
/////////////////////////////////
|
||||
// Binary data
|
||||
/////////////////////////////////
|
||||
std::cout << GridLogMessage << ILDG_BINARY_DATA << std::endl;
|
||||
off_t offset= ftell(File);
|
||||
GaugeSimpleMunger<sobjd, sobj> munge;
|
||||
BinaryIO::readLatticeObject< itype, sobjd >(Umu, filename, munge, offset, format,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
found_ildgBinary = 1;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// Minimally must find binary segment and checksum
|
||||
//////////////////////////////////////////////////////
|
||||
assert(found_ildgBinary);
|
||||
assert(found_scidacChecksum);
|
||||
|
||||
// Must find something with the lattice dimensions
|
||||
assert(found_FieldMetaData||found_ildgFormat);
|
||||
|
||||
if ( found_FieldMetaData ) {
|
||||
|
||||
std::cout << GridLogMessage<<"a Grid MetaData was record found: configuration was probably written by Grid ! Yay ! "<<std::endl;
|
||||
// std::cout << "Read Grid Plaqette "<<FieldMetaData_.plaquette<<std::endl;
|
||||
// std::cout << "Read Grid LinkTrace "<<FieldMetaData_.link_trace<<std::endl;
|
||||
|
||||
} else {
|
||||
|
||||
assert(found_ildgFormat);
|
||||
assert ( ildgFormat_.field == std::string("su3gauge") );
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
// Populate our Grid metadata as best we can
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::ostringstream vers; vers << ildgFormat_.version;
|
||||
FieldMetaData_.hdr_version = vers.str();
|
||||
FieldMetaData_.data_type = std::string("4D_SU3_GAUGE_3X3");
|
||||
|
||||
assert(FieldMetaData_.nd==4);
|
||||
assert(FieldMetaData_.dimension.size()==4);
|
||||
|
||||
FieldMetaData_.dimension[0] = ildgFormat_.lx ;
|
||||
FieldMetaData_.dimension[1] = ildgFormat_.ly ;
|
||||
FieldMetaData_.dimension[2] = ildgFormat_.lz ;
|
||||
FieldMetaData_.dimension[3] = ildgFormat_.lt ;
|
||||
|
||||
if ( found_usqcdInfo ) {
|
||||
FieldMetaData_.plaquette = usqcdInfo_.plaq;
|
||||
FieldMetaData_.link_trace= usqcdInfo_.linktr;
|
||||
// std::cout << "This configuration was probably written by USQCD and not Grid "<<std::endl;
|
||||
// std::cout << "Read USQCD Plaquette "<<FieldMetaData_.plaquette<<std::endl;
|
||||
// std::cout << "Read USQCD LinkTrace "<<FieldMetaData_.link_trace<<std::endl;
|
||||
} else {
|
||||
FieldMetaData_.plaquette = 0.0;
|
||||
FieldMetaData_.link_trace= 0.0;
|
||||
std::cout << "Uhoh... This configuration is unsafe and contains no recognised checksum or physics records that can verify it !!! "<<std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
if ( found_scidacChecksum ) {
|
||||
assert( scidac_csuma ==FieldMetaData_.scidac_checksuma);
|
||||
assert( scidac_csumb ==FieldMetaData_.scidac_checksumb);
|
||||
std::cout << GridLogMessage<<"SciDAC checksums match " << std::endl;
|
||||
}
|
||||
|
||||
if ( found_FieldMetaData || found_usqcdInfo ) {
|
||||
FieldMetaData checker;
|
||||
GaugeStatistics<GaugeField>(Umu,checker);
|
||||
assert(fabs(checker.plaquette - FieldMetaData_.plaquette )<1.0e-5);
|
||||
assert(fabs(checker.link_trace - FieldMetaData_.link_trace)<1.0e-5);
|
||||
std::cout << GridLogMessage<<"Plaquette and link trace match " << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
// format for RNG? Now just binary out
|
||||
|
@ -34,47 +34,83 @@ extern "C" { // for linkage
|
||||
|
||||
namespace Grid {
|
||||
|
||||
struct ILDGtype {
|
||||
bool is_ILDG;
|
||||
LimeWriter* LW;
|
||||
LimeReader* LR;
|
||||
#define GRID_FORMAT "grid-format"
|
||||
#define ILDG_FORMAT "ildg-format"
|
||||
#define ILDG_BINARY_DATA "ildg-binary-data"
|
||||
#define ILDG_DATA_LFN "ildg-data-lfn"
|
||||
#define USQCD_INFO "usqcdInfo"
|
||||
#define SCIDAC_CHECKSUM "scidac-checksum"
|
||||
|
||||
ILDGtype(bool is, LimeWriter* L) : is_ILDG(is), LW(L), LR(NULL) {}
|
||||
ILDGtype(bool is, LimeReader* L) : is_ILDG(is), LW(NULL), LR(L) {}
|
||||
ILDGtype() : is_ILDG(false), LW(NULL), LR(NULL) {}
|
||||
/////////////////////////////////////////////////////////////////////////////////
|
||||
// Data representation of records that enter ILDG and SciDac formats
|
||||
/////////////////////////////////////////////////////////////////////////////////
|
||||
struct ildgFormat : Serializable {
|
||||
public:
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(ildgFormat,
|
||||
double, version,
|
||||
std::string, field,
|
||||
int, precision,
|
||||
int, lx,
|
||||
int, ly,
|
||||
int, lz,
|
||||
int, lt);
|
||||
ildgFormat() {
|
||||
version=1.0;
|
||||
};
|
||||
};
|
||||
|
||||
class ILDGField {
|
||||
struct usqcdInfo : Serializable {
|
||||
public:
|
||||
// header strings (not in order)
|
||||
std::vector<int> dimension;
|
||||
std::vector<std::string> boundary;
|
||||
int data_start;
|
||||
std::string hdr_version;
|
||||
std::string storage_format;
|
||||
// Checks on data
|
||||
double link_trace;
|
||||
double plaquette;
|
||||
uint32_t checksum;
|
||||
unsigned int sequence_number;
|
||||
std::string data_type;
|
||||
std::string ensemble_id;
|
||||
std::string ensemble_label;
|
||||
std::string creator;
|
||||
std::string creator_hardware;
|
||||
std::string creation_date;
|
||||
std::string archive_date;
|
||||
std::string floating_point;
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(usqcdInfo,
|
||||
double, version,
|
||||
double, plaq,
|
||||
double, linktr,
|
||||
std::string, info);
|
||||
usqcdInfo() {
|
||||
version=1.0;
|
||||
};
|
||||
};
|
||||
|
||||
struct usqcdPropFile : Serializable {
|
||||
public:
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(usqcdPropFile,
|
||||
double, version,
|
||||
std::string, type,
|
||||
std::string, info);
|
||||
usqcdPropFile() {
|
||||
version=1.0;
|
||||
};
|
||||
};
|
||||
struct usqcdSourceInfo : Serializable {
|
||||
public:
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(usqcdSourceInfo,
|
||||
double, version,
|
||||
std::string, info);
|
||||
usqcdSourceInfo() {
|
||||
version=1.0;
|
||||
};
|
||||
};
|
||||
struct usqcdPropInfo : Serializable {
|
||||
public:
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(usqcdPropInfo,
|
||||
double, version,
|
||||
int, spin,
|
||||
int, color,
|
||||
std::string, info);
|
||||
usqcdPropInfo() {
|
||||
version=1.0;
|
||||
};
|
||||
};
|
||||
struct scidacChecksum : Serializable {
|
||||
public:
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(scidacChecksum,
|
||||
double, version,
|
||||
uint32_t, suma,
|
||||
uint32_t, sumb);
|
||||
scidacChecksum() {
|
||||
version=1.0;
|
||||
suma=sumb=0;
|
||||
};
|
||||
};
|
||||
}
|
||||
#else
|
||||
namespace Grid {
|
||||
|
||||
struct ILDGtype {
|
||||
bool is_ILDG;
|
||||
ILDGtype() : is_ILDG(false) {}
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -30,168 +30,11 @@
|
||||
#ifndef GRID_NERSC_IO_H
|
||||
#define GRID_NERSC_IO_H
|
||||
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <iomanip>
|
||||
#include <fstream>
|
||||
#include <map>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <pwd.h>
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
using namespace Grid;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Some data types for intermediate storage
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
template<typename vtype> using iLorentzColour2x3 = iVector<iVector<iVector<vtype, Nc>, 2>, 4 >;
|
||||
|
||||
typedef iLorentzColour2x3<Complex> LorentzColour2x3;
|
||||
typedef iLorentzColour2x3<ComplexF> LorentzColour2x3F;
|
||||
typedef iLorentzColour2x3<ComplexD> LorentzColour2x3D;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// header specification/interpretation
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
class NerscField {
|
||||
public:
|
||||
// header strings (not in order)
|
||||
int dimension[4];
|
||||
std::string boundary[4];
|
||||
int data_start;
|
||||
std::string hdr_version;
|
||||
std::string storage_format;
|
||||
// Checks on data
|
||||
double link_trace;
|
||||
double plaquette;
|
||||
uint32_t checksum;
|
||||
unsigned int sequence_number;
|
||||
std::string data_type;
|
||||
std::string ensemble_id ;
|
||||
std::string ensemble_label ;
|
||||
std::string creator ;
|
||||
std::string creator_hardware ;
|
||||
std::string creation_date ;
|
||||
std::string archive_date ;
|
||||
std::string floating_point;
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Bit and Physical Checksumming and QA of data
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
inline void NerscGrid(GridBase *grid,NerscField &header)
|
||||
{
|
||||
assert(grid->_ndimension==4);
|
||||
for(int d=0;d<4;d++) {
|
||||
header.dimension[d] = grid->_fdimensions[d];
|
||||
}
|
||||
for(int d=0;d<4;d++) {
|
||||
header.boundary[d] = std::string("PERIODIC");
|
||||
}
|
||||
}
|
||||
template<class GaugeField>
|
||||
inline void NerscStatistics(GaugeField & data,NerscField &header)
|
||||
{
|
||||
// How to convert data precision etc...
|
||||
header.link_trace=Grid::QCD::WilsonLoops<PeriodicGimplR>::linkTrace(data);
|
||||
header.plaquette =Grid::QCD::WilsonLoops<PeriodicGimplR>::avgPlaquette(data);
|
||||
}
|
||||
|
||||
inline void NerscMachineCharacteristics(NerscField &header)
|
||||
{
|
||||
// Who
|
||||
struct passwd *pw = getpwuid (getuid());
|
||||
if (pw) header.creator = std::string(pw->pw_name);
|
||||
|
||||
// When
|
||||
std::time_t t = std::time(nullptr);
|
||||
std::tm tm = *std::localtime(&t);
|
||||
std::ostringstream oss;
|
||||
// oss << std::put_time(&tm, "%c %Z");
|
||||
header.creation_date = oss.str();
|
||||
header.archive_date = header.creation_date;
|
||||
|
||||
// What
|
||||
struct utsname name; uname(&name);
|
||||
header.creator_hardware = std::string(name.nodename)+"-";
|
||||
header.creator_hardware+= std::string(name.machine)+"-";
|
||||
header.creator_hardware+= std::string(name.sysname)+"-";
|
||||
header.creator_hardware+= std::string(name.release);
|
||||
|
||||
}
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Utilities ; these are QCD aware
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
inline void reconstruct3(LorentzColourMatrix & cm)
|
||||
{
|
||||
const int x=0;
|
||||
const int y=1;
|
||||
const int z=2;
|
||||
for(int mu=0;mu<4;mu++){
|
||||
cm(mu)()(2,x) = adj(cm(mu)()(0,y)*cm(mu)()(1,z)-cm(mu)()(0,z)*cm(mu)()(1,y)); //x= yz-zy
|
||||
cm(mu)()(2,y) = adj(cm(mu)()(0,z)*cm(mu)()(1,x)-cm(mu)()(0,x)*cm(mu)()(1,z)); //y= zx-xz
|
||||
cm(mu)()(2,z) = adj(cm(mu)()(0,x)*cm(mu)()(1,y)-cm(mu)()(0,y)*cm(mu)()(1,x)); //z= xy-yx
|
||||
}
|
||||
}
|
||||
|
||||
template<class fobj,class sobj>
|
||||
struct NerscSimpleMunger{
|
||||
void operator()(fobj &in, sobj &out) {
|
||||
for (int mu = 0; mu < Nd; mu++) {
|
||||
for (int i = 0; i < Nc; i++) {
|
||||
for (int j = 0; j < Nc; j++) {
|
||||
out(mu)()(i, j) = in(mu)()(i, j);
|
||||
}}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
template <class fobj, class sobj>
|
||||
struct NerscSimpleUnmunger {
|
||||
|
||||
void operator()(sobj &in, fobj &out) {
|
||||
for (int mu = 0; mu < Nd; mu++) {
|
||||
for (int i = 0; i < Nc; i++) {
|
||||
for (int j = 0; j < Nc; j++) {
|
||||
out(mu)()(i, j) = in(mu)()(i, j);
|
||||
}}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
template<class fobj,class sobj>
|
||||
struct Nersc3x2munger{
|
||||
|
||||
void operator() (fobj &in,sobj &out){
|
||||
for(int mu=0;mu<4;mu++){
|
||||
for(int i=0;i<2;i++){
|
||||
for(int j=0;j<3;j++){
|
||||
out(mu)()(i,j) = in(mu)(i)(j);
|
||||
}}
|
||||
}
|
||||
reconstruct3(out);
|
||||
}
|
||||
};
|
||||
|
||||
template<class fobj,class sobj>
|
||||
struct Nersc3x2unmunger{
|
||||
|
||||
void operator() (sobj &in,fobj &out){
|
||||
for(int mu=0;mu<4;mu++){
|
||||
for(int i=0;i<2;i++){
|
||||
for(int j=0;j<3;j++){
|
||||
out(mu)(i)(j) = in(mu)()(i,j);
|
||||
}}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Write and read from fstream; comput header offset for payload
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -202,42 +45,17 @@ namespace Grid {
|
||||
std::ofstream fout(file,std::ios::out);
|
||||
}
|
||||
|
||||
#define dump_nersc_header(field, s) \
|
||||
s << "BEGIN_HEADER" << std::endl; \
|
||||
s << "HDR_VERSION = " << field.hdr_version << std::endl; \
|
||||
s << "DATATYPE = " << field.data_type << std::endl; \
|
||||
s << "STORAGE_FORMAT = " << field.storage_format << std::endl; \
|
||||
for(int i=0;i<4;i++){ \
|
||||
s << "DIMENSION_" << i+1 << " = " << field.dimension[i] << std::endl ; \
|
||||
} \
|
||||
s << "LINK_TRACE = " << std::setprecision(10) << field.link_trace << std::endl; \
|
||||
s << "PLAQUETTE = " << std::setprecision(10) << field.plaquette << std::endl; \
|
||||
for(int i=0;i<4;i++){ \
|
||||
s << "BOUNDARY_"<<i+1<<" = " << field.boundary[i] << std::endl; \
|
||||
} \
|
||||
\
|
||||
s << "CHECKSUM = "<< std::hex << std::setw(10) << field.checksum << std::dec<<std::endl; \
|
||||
s << "ENSEMBLE_ID = " << field.ensemble_id << std::endl; \
|
||||
s << "ENSEMBLE_LABEL = " << field.ensemble_label << std::endl; \
|
||||
s << "SEQUENCE_NUMBER = " << field.sequence_number << std::endl; \
|
||||
s << "CREATOR = " << field.creator << std::endl; \
|
||||
s << "CREATOR_HARDWARE = "<< field.creator_hardware << std::endl; \
|
||||
s << "CREATION_DATE = " << field.creation_date << std::endl; \
|
||||
s << "ARCHIVE_DATE = " << field.archive_date << std::endl; \
|
||||
s << "FLOATING_POINT = " << field.floating_point << std::endl; \
|
||||
s << "END_HEADER" << std::endl;
|
||||
|
||||
static inline unsigned int writeHeader(NerscField &field,std::string file)
|
||||
static inline unsigned int writeHeader(FieldMetaData &field,std::string file)
|
||||
{
|
||||
std::ofstream fout(file,std::ios::out|std::ios::in);
|
||||
fout.seekp(0,std::ios::beg);
|
||||
dump_nersc_header(field, fout);
|
||||
dump_meta_data(field, fout);
|
||||
field.data_start = fout.tellp();
|
||||
return field.data_start;
|
||||
}
|
||||
|
||||
// for the header-reader
|
||||
static inline int readHeader(std::string file,GridBase *grid, NerscField &field)
|
||||
static inline int readHeader(std::string file,GridBase *grid, FieldMetaData &field)
|
||||
{
|
||||
int offset=0;
|
||||
std::map<std::string,std::string> header;
|
||||
@ -314,14 +132,16 @@ namespace Grid {
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
template<class vsimd>
|
||||
static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,NerscField& header,std::string file)
|
||||
static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,
|
||||
FieldMetaData& header,
|
||||
std::string file)
|
||||
{
|
||||
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
|
||||
|
||||
GridBase *grid = Umu._grid;
|
||||
int offset = readHeader(file,Umu._grid,header);
|
||||
|
||||
NerscField clone(header);
|
||||
FieldMetaData clone(header);
|
||||
|
||||
std::string format(header.floating_point);
|
||||
|
||||
@ -330,34 +150,38 @@ namespace Grid {
|
||||
int ieee64big = (format == std::string("IEEE64BIG"));
|
||||
int ieee64 = (format == std::string("IEEE64"));
|
||||
|
||||
uint32_t csum;
|
||||
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
||||
// depending on datatype, set up munger;
|
||||
// munger is a function of <floating point, Real, data_type>
|
||||
if ( header.data_type == std::string("4D_SU3_GAUGE") ) {
|
||||
if ( ieee32 || ieee32big ) {
|
||||
csum=BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>
|
||||
(Umu,file,Nersc3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format);
|
||||
BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>
|
||||
(Umu,file,Gauge3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
}
|
||||
if ( ieee64 || ieee64big ) {
|
||||
csum=BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>
|
||||
(Umu,file,Nersc3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format);
|
||||
BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>
|
||||
(Umu,file,Gauge3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
}
|
||||
} else if ( header.data_type == std::string("4D_SU3_GAUGE_3x3") ) {
|
||||
if ( ieee32 || ieee32big ) {
|
||||
csum=BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
|
||||
(Umu,file,NerscSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format);
|
||||
BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
|
||||
(Umu,file,GaugeSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
}
|
||||
if ( ieee64 || ieee64big ) {
|
||||
csum=BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
|
||||
(Umu,file,NerscSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format);
|
||||
BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
|
||||
(Umu,file,GaugeSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
}
|
||||
} else {
|
||||
assert(0);
|
||||
}
|
||||
|
||||
NerscStatistics<GaugeField>(Umu,clone);
|
||||
GaugeStatistics<GaugeField>(Umu,clone);
|
||||
|
||||
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" checksum "<<std::hex<< csum<< std::dec
|
||||
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" checksum "<<std::hex<<nersc_csum<< std::dec
|
||||
<<" header "<<std::hex<<header.checksum<<std::dec <<std::endl;
|
||||
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" plaquette "<<clone.plaquette
|
||||
<<" header "<<header.plaquette<<std::endl;
|
||||
@ -369,30 +193,35 @@ namespace Grid {
|
||||
std::cout << Umu[0]<<std::endl;
|
||||
std::cout << Umu[1]<<std::endl;
|
||||
}
|
||||
if ( csum != header.checksum ) {
|
||||
if ( nersc_csum != header.checksum ) {
|
||||
std::cerr << " checksum mismatch " << std::endl;
|
||||
std::cerr << " plaqs " << clone.plaquette << " " << header.plaquette << std::endl;
|
||||
std::cerr << " trace " << clone.link_trace<< " " << header.link_trace<< std::endl;
|
||||
std::cerr << " csum " <<std::hex<< csum << " " << header.checksum<< std::dec<< std::endl;
|
||||
std::cerr << " nersc_csum " <<std::hex<< nersc_csum << " " << header.checksum<< std::dec<< std::endl;
|
||||
exit(0);
|
||||
}
|
||||
assert(fabs(clone.plaquette -header.plaquette ) < 1.0e-5 );
|
||||
assert(fabs(clone.link_trace-header.link_trace) < 1.0e-6 );
|
||||
assert(csum == header.checksum );
|
||||
assert(nersc_csum == header.checksum );
|
||||
|
||||
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<< " and plaquette, link trace, and checksum agree"<<std::endl;
|
||||
}
|
||||
|
||||
template<class vsimd>
|
||||
static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,std::string file, int two_row,int bits32)
|
||||
static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,
|
||||
std::string file,
|
||||
int two_row,
|
||||
int bits32)
|
||||
{
|
||||
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
|
||||
|
||||
typedef iLorentzColourMatrix<vsimd> vobj;
|
||||
typedef typename vobj::scalar_object sobj;
|
||||
|
||||
FieldMetaData header;
|
||||
///////////////////////////////////////////
|
||||
// Following should become arguments
|
||||
NerscField header;
|
||||
///////////////////////////////////////////
|
||||
header.sequence_number = 1;
|
||||
header.ensemble_id = "UKQCD";
|
||||
header.ensemble_label = "DWF";
|
||||
@ -402,32 +231,31 @@ namespace Grid {
|
||||
|
||||
GridBase *grid = Umu._grid;
|
||||
|
||||
NerscGrid(grid,header);
|
||||
NerscStatistics<GaugeField>(Umu,header);
|
||||
NerscMachineCharacteristics(header);
|
||||
GridMetaData(grid,header);
|
||||
assert(header.nd==4);
|
||||
GaugeStatistics<GaugeField>(Umu,header);
|
||||
MachineCharacteristics(header);
|
||||
|
||||
int offset;
|
||||
|
||||
truncate(file);
|
||||
|
||||
if ( two_row ) {
|
||||
header.floating_point = std::string("IEEE64BIG");
|
||||
header.data_type = std::string("4D_SU3_GAUGE");
|
||||
Nersc3x2unmunger<fobj2D,sobj> munge;
|
||||
offset = writeHeader(header,file);
|
||||
header.checksum=BinaryIO::writeLatticeObject<vobj,fobj2D>(Umu,file,munge,offset,header.floating_point);
|
||||
writeHeader(header,file);
|
||||
} else {
|
||||
// Sod it -- always write 3x3 double
|
||||
header.floating_point = std::string("IEEE64BIG");
|
||||
header.data_type = std::string("4D_SU3_GAUGE_3x3");
|
||||
NerscSimpleUnmunger<fobj3D,sobj> munge;
|
||||
GaugeSimpleUnmunger<fobj3D,sobj> munge;
|
||||
offset = writeHeader(header,file);
|
||||
header.checksum=BinaryIO::writeLatticeObject<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point);
|
||||
|
||||
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
||||
BinaryIO::writeLatticeObject<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
header.checksum = nersc_csum;
|
||||
writeHeader(header,file);
|
||||
}
|
||||
|
||||
std::cout<<GridLogMessage <<"Written NERSC Configuration on "<< file << " checksum "
|
||||
<<std::hex<<header.checksum
|
||||
<<std::dec<<" plaq "<< header.plaquette <<std::endl;
|
||||
|
||||
}
|
||||
///////////////////////////////
|
||||
// RNG state
|
||||
@ -437,17 +265,18 @@ namespace Grid {
|
||||
typedef typename GridParallelRNG::RngStateType RngStateType;
|
||||
|
||||
// Following should become arguments
|
||||
NerscField header;
|
||||
FieldMetaData header;
|
||||
header.sequence_number = 1;
|
||||
header.ensemble_id = "UKQCD";
|
||||
header.ensemble_label = "DWF";
|
||||
|
||||
GridBase *grid = parallel._grid;
|
||||
|
||||
NerscGrid(grid,header);
|
||||
GridMetaData(grid,header);
|
||||
assert(header.nd==4);
|
||||
header.link_trace=0.0;
|
||||
header.plaquette=0.0;
|
||||
NerscMachineCharacteristics(header);
|
||||
MachineCharacteristics(header);
|
||||
|
||||
int offset;
|
||||
|
||||
@ -466,7 +295,9 @@ namespace Grid {
|
||||
|
||||
truncate(file);
|
||||
offset = writeHeader(header,file);
|
||||
header.checksum = BinaryIO::writeRNG(serial,parallel,file,offset);
|
||||
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
||||
BinaryIO::writeRNG(serial,parallel,file,offset,nersc_csum,scidac_csuma,scidac_csumb);
|
||||
header.checksum = nersc_csum;
|
||||
offset = writeHeader(header,file);
|
||||
|
||||
std::cout<<GridLogMessage
|
||||
@ -476,7 +307,7 @@ namespace Grid {
|
||||
|
||||
}
|
||||
|
||||
static inline void readRNGState(GridSerialRNG &serial,GridParallelRNG & parallel,NerscField& header,std::string file)
|
||||
static inline void readRNGState(GridSerialRNG &serial,GridParallelRNG & parallel,FieldMetaData& header,std::string file)
|
||||
{
|
||||
typedef typename GridParallelRNG::RngStateType RngStateType;
|
||||
|
||||
@ -484,7 +315,7 @@ namespace Grid {
|
||||
|
||||
int offset = readHeader(file,grid,header);
|
||||
|
||||
NerscField clone(header);
|
||||
FieldMetaData clone(header);
|
||||
|
||||
std::string format(header.floating_point);
|
||||
std::string data_type(header.data_type);
|
||||
@ -504,19 +335,19 @@ namespace Grid {
|
||||
|
||||
// depending on datatype, set up munger;
|
||||
// munger is a function of <floating point, Real, data_type>
|
||||
uint32_t csum=BinaryIO::readRNG(serial,parallel,file,offset);
|
||||
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
||||
BinaryIO::readRNG(serial,parallel,file,offset,nersc_csum,scidac_csuma,scidac_csumb);
|
||||
|
||||
if ( csum != header.checksum ) {
|
||||
std::cerr << "checksum mismatch "<<std::hex<< csum <<" "<<header.checksum<<std::dec<<std::endl;
|
||||
if ( nersc_csum != header.checksum ) {
|
||||
std::cerr << "checksum mismatch "<<std::hex<< nersc_csum <<" "<<header.checksum<<std::dec<<std::endl;
|
||||
exit(0);
|
||||
}
|
||||
assert(csum == header.checksum );
|
||||
assert(nersc_csum == header.checksum );
|
||||
|
||||
std::cout<<GridLogMessage <<"Read NERSC RNG file "<<file<< " format "<< data_type <<std::endl;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
}}
|
||||
#endif
|
||||
|
@ -62,36 +62,50 @@ class BinaryHmcCheckpointer : public BaseHmcCheckpointer<Impl> {
|
||||
fout.close();
|
||||
}
|
||||
|
||||
void TrajectoryComplete(int traj, Field &U, GridSerialRNG &sRNG,
|
||||
GridParallelRNG &pRNG) {
|
||||
void TrajectoryComplete(int traj, Field &U, GridSerialRNG &sRNG, GridParallelRNG &pRNG) {
|
||||
|
||||
if ((traj % Params.saveInterval) == 0) {
|
||||
std::string config, rng;
|
||||
this->build_filenames(traj, Params, config, rng);
|
||||
|
||||
uint32_t nersc_csum;
|
||||
uint32_t scidac_csuma;
|
||||
uint32_t scidac_csumb;
|
||||
|
||||
BinarySimpleUnmunger<sobj_double, sobj> munge;
|
||||
truncate(rng);
|
||||
BinaryIO::writeRNG(sRNG, pRNG, rng, 0);
|
||||
BinaryIO::writeRNG(sRNG, pRNG, rng, 0,nersc_csum,scidac_csuma,scidac_csumb);
|
||||
truncate(config);
|
||||
uint32_t csum = BinaryIO::writeLatticeObject<vobj, sobj_double>(
|
||||
U, config, munge, 0, Params.format);
|
||||
|
||||
BinaryIO::writeLatticeObject<vobj, sobj_double>(U, config, munge, 0, Params.format,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
|
||||
std::cout << GridLogMessage << "Written Binary Configuration " << config
|
||||
<< " checksum " << std::hex << csum << std::dec << std::endl;
|
||||
<< " checksum " << std::hex
|
||||
<< nersc_csum <<"/"
|
||||
<< scidac_csuma <<"/"
|
||||
<< scidac_csumb
|
||||
<< std::dec << std::endl;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
void CheckpointRestore(int traj, Field &U, GridSerialRNG &sRNG,
|
||||
GridParallelRNG &pRNG) {
|
||||
void CheckpointRestore(int traj, Field &U, GridSerialRNG &sRNG, GridParallelRNG &pRNG) {
|
||||
std::string config, rng;
|
||||
this->build_filenames(traj, Params, config, rng);
|
||||
|
||||
BinarySimpleMunger<sobj_double, sobj> munge;
|
||||
BinaryIO::readRNG(sRNG, pRNG, rng, 0);
|
||||
uint32_t csum = BinaryIO::readLatticeObject<vobj, sobj_double>(
|
||||
U, config, munge, 0, Params.format);
|
||||
|
||||
uint32_t nersc_csum;
|
||||
uint32_t scidac_csuma;
|
||||
uint32_t scidac_csumb;
|
||||
BinaryIO::readRNG(sRNG, pRNG, rng, 0,nersc_csum,scidac_csuma,scidac_csumb);
|
||||
BinaryIO::readLatticeObject<vobj, sobj_double>(U, config, munge, 0, Params.format,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
|
||||
std::cout << GridLogMessage << "Read Binary Configuration " << config
|
||||
<< " checksum " << std::hex << csum << std::dec << std::endl;
|
||||
<< " checksums " << std::hex << nersc_csum<<"/"<<scidac_csuma<<"/"<<scidac_csumb
|
||||
<< std::dec << std::endl;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -75,12 +75,16 @@ class ILDGHmcCheckpointer : public BaseHmcCheckpointer<Implementation> {
|
||||
std::string config, rng;
|
||||
this->build_filenames(traj, Params, config, rng);
|
||||
|
||||
ILDGIO IO(config, ILDGwrite);
|
||||
BinaryIO::writeRNGSerial(sRNG, pRNG, rng, 0);
|
||||
uint32_t csum = IO.writeConfiguration(U, Params.format);
|
||||
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
||||
BinaryIO::writeRNG(sRNG, pRNG, rng, 0,nersc_csum,scidac_csuma,scidac_csumb);
|
||||
IldgIO::writeConfiguration(config,U, Params.format);
|
||||
|
||||
std::cout << GridLogMessage << "Written ILDG Configuration on " << config
|
||||
<< " checksum " << std::hex << csum << std::dec << std::endl;
|
||||
<< " checksum " << std::hex
|
||||
<< nersc_csum<<"/"
|
||||
<< scidac_csuma<<"/"
|
||||
<< scidac_csumb
|
||||
<< std::dec << std::endl;
|
||||
}
|
||||
};
|
||||
|
||||
@ -89,12 +93,18 @@ class ILDGHmcCheckpointer : public BaseHmcCheckpointer<Implementation> {
|
||||
std::string config, rng;
|
||||
this->build_filenames(traj, Params, config, rng);
|
||||
|
||||
ILDGIO IO(config, ILDGread);
|
||||
BinaryIO::readRNGSerial(sRNG, pRNG, rng, 0);
|
||||
uint32_t csum = IO.readConfiguration(U); // format from the header
|
||||
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
||||
BinaryIO::readRNG(sRNG, pRNG, rng, 0,nersc_csum,scidac_csuma,scidac_csumb);
|
||||
|
||||
FieldMetaData header;
|
||||
IldgIO::readConfiguration(config,U,header); // format from the header
|
||||
|
||||
std::cout << GridLogMessage << "Read ILDG Configuration from " << config
|
||||
<< " checksum " << std::hex << csum << std::dec << std::endl;
|
||||
<< " checksum " << std::hex
|
||||
<< nersc_csum<<"/"
|
||||
<< scidac_csuma<<"/"
|
||||
<< scidac_csumb
|
||||
<< std::dec << std::endl;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ class NerscHmcCheckpointer : public BaseHmcCheckpointer<Gimpl> {
|
||||
std::string config, rng;
|
||||
this->build_filenames(traj, Params, config, rng);
|
||||
|
||||
NerscField header;
|
||||
FieldMetaData header;
|
||||
NerscIO::readRNGState(sRNG, pRNG, header, rng);
|
||||
NerscIO::readConfiguration(U, header, config);
|
||||
};
|
||||
|
@ -12,7 +12,4 @@
|
||||
#include <Grid/qcd/utils/SUnAdjoint.h>
|
||||
#include <Grid/qcd/utils/SUnTwoIndex.h>
|
||||
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -32,16 +32,21 @@ using namespace Grid;
|
||||
using namespace std;
|
||||
|
||||
// Writer implementation ///////////////////////////////////////////////////////
|
||||
XmlWriter::XmlWriter(const string &fileName)
|
||||
: fileName_(fileName)
|
||||
XmlWriter::XmlWriter(const string &fileName, string toplev) : fileName_(fileName)
|
||||
{
|
||||
node_ = doc_.append_child();
|
||||
node_.set_name("grid");
|
||||
if ( toplev == std::string("") ) {
|
||||
node_=doc_;
|
||||
} else {
|
||||
node_=doc_.append_child();
|
||||
node_.set_name(toplev.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
XmlWriter::~XmlWriter(void)
|
||||
{
|
||||
if ( fileName_ != std::string("") ) {
|
||||
doc_.save_file(fileName_.c_str(), " ");
|
||||
}
|
||||
}
|
||||
|
||||
void XmlWriter::push(const string &s)
|
||||
@ -53,21 +58,44 @@ void XmlWriter::pop(void)
|
||||
{
|
||||
node_ = node_.parent();
|
||||
}
|
||||
|
||||
// Reader implementation ///////////////////////////////////////////////////////
|
||||
XmlReader::XmlReader(const string &fileName)
|
||||
: fileName_(fileName)
|
||||
std::string XmlWriter::XmlString(void)
|
||||
{
|
||||
pugi::xml_parse_result result = doc_.load_file(fileName_.c_str());
|
||||
std::ostringstream oss;
|
||||
doc_.save(oss);
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
if ( !result )
|
||||
{
|
||||
XmlReader::XmlReader(const char *xmlstring,string toplev) : fileName_("")
|
||||
{
|
||||
pugi::xml_parse_result result;
|
||||
result = doc_.load_string(xmlstring);
|
||||
if ( !result ) {
|
||||
cerr << "XML error description: " << result.description() << "\n";
|
||||
cerr << "XML error offset : " << result.offset << "\n";
|
||||
abort();
|
||||
}
|
||||
if ( toplev == std::string("") ) {
|
||||
node_ = doc_;
|
||||
} else {
|
||||
node_ = doc_.child(toplev.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
node_ = doc_.child("grid");
|
||||
// Reader implementation ///////////////////////////////////////////////////////
|
||||
XmlReader::XmlReader(const string &fileName,string toplev) : fileName_(fileName)
|
||||
{
|
||||
pugi::xml_parse_result result;
|
||||
result = doc_.load_file(fileName_.c_str());
|
||||
if ( !result ) {
|
||||
cerr << "XML error description: " << result.description() << "\n";
|
||||
cerr << "XML error offset : " << result.offset << "\n";
|
||||
abort();
|
||||
}
|
||||
if ( toplev == std::string("") ) {
|
||||
node_ = doc_;
|
||||
} else {
|
||||
node_ = doc_.child(toplev.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
bool XmlReader::push(const string &s)
|
||||
|
@ -45,9 +45,8 @@ namespace Grid
|
||||
|
||||
class XmlWriter: public Writer<XmlWriter>
|
||||
{
|
||||
|
||||
public:
|
||||
XmlWriter(const std::string &fileName);
|
||||
XmlWriter(const std::string &fileName,std::string toplev = std::string("grid") );
|
||||
virtual ~XmlWriter(void);
|
||||
void push(const std::string &s);
|
||||
void pop(void);
|
||||
@ -55,6 +54,7 @@ namespace Grid
|
||||
void writeDefault(const std::string &s, const U &x);
|
||||
template <typename U>
|
||||
void writeDefault(const std::string &s, const std::vector<U> &x);
|
||||
std::string XmlString(void);
|
||||
private:
|
||||
pugi::xml_document doc_;
|
||||
pugi::xml_node node_;
|
||||
@ -64,7 +64,8 @@ namespace Grid
|
||||
class XmlReader: public Reader<XmlReader>
|
||||
{
|
||||
public:
|
||||
XmlReader(const std::string &fileName);
|
||||
XmlReader(const char *xmlstring,std::string toplev = std::string("grid") );
|
||||
XmlReader(const std::string &fileName,std::string toplev = std::string("grid") );
|
||||
virtual ~XmlReader(void) = default;
|
||||
bool push(const std::string &s);
|
||||
void pop(void);
|
||||
@ -118,7 +119,7 @@ namespace Grid
|
||||
std::string buf;
|
||||
|
||||
readDefault(s, buf);
|
||||
std::cout << s << " " << buf << std::endl;
|
||||
// std::cout << s << " " << buf << std::endl;
|
||||
fromString(output, buf);
|
||||
}
|
||||
|
||||
|
@ -64,8 +64,8 @@ int main (int argc, char ** argv)
|
||||
std::cout <<GridLogMessage<< " ...done "<<std::endl;
|
||||
|
||||
std::string rfile("./ckpoint_rng.4000");
|
||||
FieldMetaData rngheader;
|
||||
NerscIO::writeRNGState(sRNGa,pRNGa,rfile);
|
||||
NerscField rngheader;
|
||||
NerscIO::readRNGState (sRNGb,pRNGb,rngheader,rfile);
|
||||
|
||||
LatticeComplex tmpa(&Fine); random(pRNGa,tmpa);
|
||||
@ -87,7 +87,7 @@ int main (int argc, char ** argv)
|
||||
|
||||
SU3::HotConfiguration(pRNGa,Umu);
|
||||
|
||||
NerscField header;
|
||||
FieldMetaData header;
|
||||
std::string file("./ckpoint_lat.4000");
|
||||
|
||||
int precision32 = 0;
|
||||
|
@ -50,7 +50,7 @@ int main (int argc, char ** argv)
|
||||
LatticeGaugeField Umu(&Fine);
|
||||
std::vector<LatticeColourMatrix> U(4,&Fine);
|
||||
|
||||
NerscField header;
|
||||
FieldMetaData header;
|
||||
std::string file("./ckpoint_lat");
|
||||
NerscIO::readConfiguration(Umu,header,file);
|
||||
|
||||
|
@ -31,6 +31,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
|
||||
using namespace Grid;
|
||||
using namespace Grid::QCD;
|
||||
|
||||
GRID_SERIALIZABLE_ENUM(myenum, undef, red, 1, blue, 2, green, 3);
|
||||
|
||||
@ -62,6 +63,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
int16_t i16 = 1;
|
||||
uint16_t u16 = 2;
|
||||
int32_t i32 = 3;
|
||||
@ -238,6 +240,21 @@ int main(int argc,char **argv)
|
||||
std::cout << jcopy1 << std::endl << jveccopy1 << std::endl;
|
||||
}
|
||||
|
||||
{
|
||||
ildgFormat format;
|
||||
format.version =1.0;
|
||||
format.field =std::string("su3gauge");
|
||||
format.precision =32;
|
||||
format.lx =24;
|
||||
format.ly =24;
|
||||
format.lz =24;
|
||||
format.lt =48;
|
||||
XmlWriter WR("ildg-format.xml","");
|
||||
XmlWriter WRs("","");
|
||||
write(WR,"ildgFormat",format);
|
||||
write(WRs,"ildgFormat",format);
|
||||
std::cout << " XmlString: " <<WRs.XmlString()<<std::endl;
|
||||
}
|
||||
/*
|
||||
// This is still work in progress
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user