diff --git a/lib/parallelIO/BinaryIO.h b/lib/parallelIO/BinaryIO.h index c1fca348..cbc619ef 100644 --- a/lib/parallelIO/BinaryIO.h +++ b/lib/parallelIO/BinaryIO.h @@ -250,6 +250,149 @@ class BinaryIO { return csum; } + template + static inline uint32_t readObjectMPI(Lattice &Umu,std::string file,munger munge,int offset,const std::string &format) + { + typedef typename vobj::scalar_object sobj; + + GridBase *grid = Umu._grid; + + std::cout<< GridLogMessage<< "MPI read I/O "<< file<< std::endl; + GridStopWatch timer; timer.Start(); + + Umu = zero; + uint32_t csum=0; + uint64_t bytes=0; + + int ndim = grid->Dimensions(); + int nrank = grid->ProcessorCount(); + int myrank = grid->ThisRank(); + + std::vector psizes = grid->ProcessorGrid(); + std::vector pcoor = grid->ThisProcessorCoor(); + std::vector gLattice= grid->GlobalDimensions(); + std::vector lLattice= grid->LocalDimensions(); + + std::vector distribs(ndim,MPI_DISTRIBUTE_BLOCK); + std::vector dargs (ndim,MPI_DISTRIBUTE_DFLT_DARG); + + std::vector lStart(ndim); + std::vector gStart(ndim); + + // Flatten the file + int lsites = grid->lSites(); + std::vector scalardata(lsites); + std::vector iodata(lsites); // Munge, checksum, byte order in here + + for(int d=0;dcommunicator, file.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh); + assert(ierr==0); + ierr=MPI_File_set_view(fh, disp, mpiObject, fileArray, "native", MPI_INFO_NULL); + // std::cout<< "MPI File set view returned " <GlobalSum(csum); + grid->Barrier(); + + vectorizeFromLexOrdArray(scalardata,Umu); + + timer.Stop(); + std::cout< static inline uint32_t writeObjectSerial(Lattice &Umu,std::string file,munger munge,int offset, const std::string & format) @@ -597,7 +740,7 @@ class BinaryIO { for(int c=0;c