1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-10 07:55:35 +00:00

NERSC shakeout of this

This commit is contained in:
paboyle 2017-07-02 14:58:30 -07:00
parent 349d75e483
commit 57002924bc
2 changed files with 37 additions and 36 deletions

View File

@ -70,7 +70,7 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,
MPI_Comm_size(parent.communicator,&Nparent);
int childsize=1;
for(int d=0;d<_ndimension;d++) {
for(int d=0;d<processors.size();d++) {
childsize *= processors[d];
}
int Nchild = Nparent/childsize;
@ -83,12 +83,12 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,
MPI_Comm comm_split;
if ( Nchild > 1 ) {
std::cout << GridLogMessage<<"Child communicator of "<< parent.communicator<<std::endl;
std::cout << GridLogMessage<<" parent grid ";
std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec<<std::endl;
std::cout << GridLogMessage<<" parent grid["<< parent._ndimension<<"] ";
for(int d=0;d<parent._processors.size();d++) std::cout << parent._processors[d] << " ";
std::cout<<std::endl;
std::cout << GridLogMessage<<" child grid ";
std::cout << GridLogMessage<<" child grid["<< _ndimension <<"] ";
for(int d=0;d<processors.size();d++) std::cout << processors[d] << " ";
std::cout<<std::endl;

View File

@ -95,48 +95,49 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
emptyUserRecord record;
std::string file("./scratch.scidac");
int me = UGrid->ThisRank();
LatticeGaugeField s_Umu(SGrid);
FermionField s_src(SFGrid);
FermionField s_res(SFGrid);
{
ScidacWriter _ScidacWriter;
_ScidacWriter.open(file);
std::cout << GridLogMessage << " Writing out gauge field "<<std::endl;
_ScidacWriter.writeScidacFieldRecord(Umu,record);
for(int n=0;n<nrhs;n++){
_ScidacWriter.writeScidacFieldRecord(src[n],record);
}
_ScidacWriter.close();
std::cout << GridLogMessage << " Reading in gauge field "<<std::endl;
ScidacReader _ScidacReader;
_ScidacReader.open(file);
_ScidacReader.readScidacFieldRecord(s_Umu,record);
_ScidacReader.close();
}
{
for(int n=0;n<nrhs;n++){
//////////////////////////////////////////
// Read back into single rank fields
//////////////////////////////////////////
std::cout << GridLogMessage << " Writing out record "<<n<<std::endl;
ScidacWriter _ScidacWriter;
_ScidacWriter.open(file);
_ScidacWriter.writeScidacFieldRecord(src[n],record);
_ScidacWriter.close();
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Reading back in the single process view "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
int me = UGrid->ThisRank();
LatticeGaugeField s_Umu(SGrid);
FermionField s_src(SFGrid);
FermionField s_res(SFGrid);
{
if ( n==me ) {
ScidacReader _ScidacReader;
_ScidacReader.open(file);
std::cout << GridLogMessage << " Opened file "<<std::endl;
_ScidacReader.readScidacFieldRecord(s_Umu,record);
std::cout << GridLogMessage << " Read gauge field "<<std::endl;
for(int n=0;n<nrhs;n++){
if ( n==me ) {
std::cout << GridLogMessage << " Read record "<<n<<std::endl;
_ScidacReader.readScidacFieldRecord(s_src,record);
} else {
std::cout << GridLogMessage << " Skip record "<<n<<std::endl;
_ScidacReader.skipScidacFieldRecord();
}
}
_ScidacReader.close();
}
FGrid->Barrier();
}
}
///////////////////////////////////////////////////////////////
// Set up N-solvers as trivially parallel