1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-15 02:05:37 +00:00
Grid/benchmarks/Benchmark_comms.cc

174 lines
5.0 KiB
C++
Raw Normal View History

2015-05-02 23:42:30 +01:00
#include <Grid.h>
using namespace std;
using namespace Grid;
using namespace Grid::QCD;
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
2015-05-12 20:41:44 +01:00
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplexD::Nsimd());
std::vector<int> mpi_layout = GridDefaultMpi();
int threads = GridThread::GetThreads();
std::cout << "Grid is setup to use "<<threads<<" threads"<<std::endl;
2015-05-02 23:42:30 +01:00
int Nloop=10;
2015-05-02 23:51:43 +01:00
int nmu=0;
for(int mu=0;mu<4;mu++) if (mpi_layout[mu]>1) nmu++;
std::cout << "===================================================================================================="<<std::endl;
std::cout << "= Benchmarking concurrent halo exchange in "<<nmu<<" dimensions"<<std::endl;
std::cout << "===================================================================================================="<<std::endl;
std::cout << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
2015-05-21 06:35:46 +01:00
for(int lat=4;lat<=32;lat+=2){
2015-05-02 23:42:30 +01:00
for(int Ls=1;Ls<=16;Ls*=2){
2015-06-25 10:59:53 +01:00
std::vector<int> latt_size ({lat*mpi_layout[0],
lat*mpi_layout[1],
lat*mpi_layout[2],
lat*mpi_layout[3]});
2015-05-03 09:44:47 +01:00
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
std::vector<std::vector<HalfSpinColourVectorD> > xbuf(8,std::vector<HalfSpinColourVectorD>(lat*lat*lat*Ls));
std::vector<std::vector<HalfSpinColourVectorD> > rbuf(8,std::vector<HalfSpinColourVectorD>(lat*lat*lat*Ls));
int ncomm;
2015-05-02 23:42:30 +01:00
int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD);
2015-05-03 09:44:47 +01:00
2015-05-02 23:42:30 +01:00
double start=usecond();
for(int i=0;i<Nloop;i++){
std::vector<CartesianCommunicator::CommsRequest_t> requests;
ncomm=0;
for(int mu=0;mu<4;mu++){
if (mpi_layout[mu]>1 ) {
ncomm++;
int comm_proc=1;
int xmit_to_rank;
int recv_from_rank;
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
Grid.SendToRecvFromBegin(requests,
(void *)&xbuf[mu][0],
xmit_to_rank,
(void *)&rbuf[mu][0],
recv_from_rank,
bytes);
comm_proc = mpi_layout[mu]-1;
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
Grid.SendToRecvFromBegin(requests,
(void *)&xbuf[mu+4][0],
xmit_to_rank,
(void *)&rbuf[mu+4][0],
recv_from_rank,
bytes);
}
}
Grid.SendToRecvFromComplete(requests);
Grid.Barrier();
2015-05-03 09:44:47 +01:00
}
2015-05-02 23:42:30 +01:00
double stop=usecond();
2015-05-29 14:11:34 +01:00
double dbytes = bytes;
double xbytes = Nloop*dbytes*2.0*ncomm;
2015-05-02 23:42:30 +01:00
double rbytes = xbytes;
double bidibytes = xbytes+rbytes;
double time = stop-start; // microseconds
2015-05-02 23:42:30 +01:00
std::cout << lat<<"\t\t"<<Ls<<"\t\t"<<bytes<<"\t\t"<<xbytes/time<<"\t\t"<<bidibytes/time<<std::endl;
}
2015-05-02 23:51:43 +01:00
}
std::cout << "===================================================================================================="<<std::endl;
std::cout << "= Benchmarking sequential halo exchange in "<<nmu<<" dimensions"<<std::endl;
std::cout << "===================================================================================================="<<std::endl;
std::cout << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
2015-05-21 06:35:46 +01:00
for(int lat=4;lat<=32;lat+=2){
2015-05-02 23:51:43 +01:00
for(int Ls=1;Ls<=16;Ls*=2){
2015-05-03 09:44:47 +01:00
std::vector<int> latt_size ({lat,lat,lat,lat});
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
std::vector<std::vector<HalfSpinColourVectorD> > xbuf(8,std::vector<HalfSpinColourVectorD>(lat*lat*lat*Ls));
std::vector<std::vector<HalfSpinColourVectorD> > rbuf(8,std::vector<HalfSpinColourVectorD>(lat*lat*lat*Ls));
int ncomm;
2015-05-02 23:51:43 +01:00
int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD);
2015-05-03 09:44:47 +01:00
2015-05-02 23:51:43 +01:00
double start=usecond();
for(int i=0;i<Nloop;i++){
2015-05-02 23:42:30 +01:00
2015-05-02 23:51:43 +01:00
ncomm=0;
for(int mu=0;mu<4;mu++){
if (mpi_layout[mu]>1 ) {
ncomm++;
int comm_proc=1;
int xmit_to_rank;
int recv_from_rank;
{
std::vector<CartesianCommunicator::CommsRequest_t> requests;
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
Grid.SendToRecvFromBegin(requests,
(void *)&xbuf[mu][0],
xmit_to_rank,
(void *)&rbuf[mu][0],
recv_from_rank,
bytes);
Grid.SendToRecvFromComplete(requests);
}
comm_proc = mpi_layout[mu]-1;
{
std::vector<CartesianCommunicator::CommsRequest_t> requests;
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
Grid.SendToRecvFromBegin(requests,
(void *)&xbuf[mu+4][0],
xmit_to_rank,
(void *)&rbuf[mu+4][0],
recv_from_rank,
bytes);
Grid.SendToRecvFromComplete(requests);
}
}
}
Grid.Barrier();
}
double stop=usecond();
2015-05-29 14:11:34 +01:00
double dbytes = bytes;
double xbytes = Nloop*dbytes*2.0*ncomm;
2015-05-02 23:51:43 +01:00
double rbytes = xbytes;
double bidibytes = xbytes+rbytes;
double time = stop-start;
std::cout << lat<<"\t\t"<<Ls<<"\t\t"<<bytes<<"\t\t"<<xbytes/time<<"\t\t"<<bidibytes/time<<std::endl;
}
}
2015-05-02 23:42:30 +01:00
Grid_finalize();
}