mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-22 17:52:02 +01:00
Compare commits
30 Commits
feature/fe
...
4f997c5f04
Author | SHA1 | Date | |
---|---|---|---|
4f997c5f04 | |||
18028f4309 | |||
5164016740 | |||
d83beaa890 | |||
f9f05e995b | |||
e651b9e7ab | |||
47b4e91473 | |||
3f31afa4fc | |||
f82ce67624 | |||
b52e8ef65a | |||
2594e3c230 | |||
8cedb45af2 | |||
aa008cbe99 | |||
6fb6ca5b6b | |||
b8ee19691c | |||
ef820a26cd | |||
5012adfebf | |||
bb5c16b97f | |||
0d80eeb545 | |||
b0f4eee78b | |||
5340e50427 | |||
0f1c5b08a1 | |||
70988e43d2 | |||
aab3bcb46f | |||
da06d15f73 | |||
e8b1251b8c | |||
fad5a74a4b | |||
e83f6a6ae9 | |||
6283d11d50 | |||
6616d5d090 |
@ -16,6 +16,7 @@
|
||||
#include <functional>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <strings.h>
|
||||
#include <stdio.h>
|
||||
#include <signal.h>
|
||||
#include <ctime>
|
||||
|
@ -53,10 +53,11 @@ public:
|
||||
// Communicator should know nothing of the physics grid, only processor grid.
|
||||
////////////////////////////////////////////
|
||||
int _Nprocessors; // How many in all
|
||||
Coordinate _processors; // Which dimensions get relayed out over processors lanes.
|
||||
int _processor; // linear processor rank
|
||||
Coordinate _processor_coor; // linear processor coordinate
|
||||
unsigned long _ndimension;
|
||||
Coordinate _shm_processors; // Which dimensions get relayed out over processors lanes.
|
||||
Coordinate _processors; // Which dimensions get relayed out over processors lanes.
|
||||
Coordinate _processor_coor; // linear processor coordinate
|
||||
static Grid_MPI_Comm communicator_world;
|
||||
Grid_MPI_Comm communicator;
|
||||
std::vector<Grid_MPI_Comm> communicator_halo;
|
||||
@ -97,8 +98,9 @@ public:
|
||||
int BossRank(void) ;
|
||||
int ThisRank(void) ;
|
||||
const Coordinate & ThisProcessorCoor(void) ;
|
||||
const Coordinate & ShmGrid(void) { return _shm_processors; } ;
|
||||
const Coordinate & ProcessorGrid(void) ;
|
||||
int ProcessorCount(void) ;
|
||||
int ProcessorCount(void) ;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// very VERY rarely (Log, serial RNG) we need world without a grid
|
||||
@ -142,16 +144,16 @@ public:
|
||||
int bytes);
|
||||
|
||||
double StencilSendToRecvFrom(void *xmit,
|
||||
int xmit_to_rank,
|
||||
int xmit_to_rank,int do_xmit,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int recv_from_rank,int do_recv,
|
||||
int bytes,int dir);
|
||||
|
||||
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
int xmit_to_rank,int do_xmit,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int recv_from_rank,int do_recv,
|
||||
int bytes,int dir);
|
||||
|
||||
|
||||
|
@ -106,7 +106,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
||||
// Remap using the shared memory optimising routine
|
||||
// The remap creates a comm which must be freed
|
||||
////////////////////////////////////////////////////
|
||||
GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm);
|
||||
GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm,_shm_processors);
|
||||
InitFromMPICommunicator(processors,optimal_comm);
|
||||
SetCommunicator(optimal_comm);
|
||||
///////////////////////////////////////////////////
|
||||
@ -124,12 +124,13 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
|
||||
int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension);
|
||||
Coordinate parent_processor_coor(_ndimension,0);
|
||||
Coordinate parent_processors (_ndimension,1);
|
||||
|
||||
Coordinate shm_processors (_ndimension,1);
|
||||
// Can make 5d grid from 4d etc...
|
||||
int pad = _ndimension-parent_ndimension;
|
||||
for(int d=0;d<parent_ndimension;d++){
|
||||
parent_processor_coor[pad+d]=parent._processor_coor[d];
|
||||
parent_processors [pad+d]=parent._processors[d];
|
||||
shm_processors [pad+d]=parent._shm_processors[d];
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -154,6 +155,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
|
||||
ccoor[d] = parent_processor_coor[d] % processors[d];
|
||||
scoor[d] = parent_processor_coor[d] / processors[d];
|
||||
ssize[d] = parent_processors[d] / processors[d];
|
||||
if ( processors[d] < shm_processors[d] ) shm_processors[d] = processors[d]; // subnode splitting.
|
||||
}
|
||||
|
||||
// rank within subcomm ; srank is rank of subcomm within blocks of subcomms
|
||||
@ -335,22 +337,22 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
||||
}
|
||||
// Basic Halo comms primitive
|
||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||
int dest,
|
||||
int dest, int dox,
|
||||
void *recv,
|
||||
int from,
|
||||
int from, int dor,
|
||||
int bytes,int dir)
|
||||
{
|
||||
std::vector<CommsRequest_t> list;
|
||||
double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,recv,from,bytes,dir);
|
||||
double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,dox,recv,from,dor,bytes,dir);
|
||||
StencilSendToRecvFromComplete(list,dir);
|
||||
return offbytes;
|
||||
}
|
||||
|
||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,
|
||||
int dest,int dox,
|
||||
void *recv,
|
||||
int from,
|
||||
int from,int dor,
|
||||
int bytes,int dir)
|
||||
{
|
||||
int ncomm =communicator_halo.size();
|
||||
@ -370,30 +372,33 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
||||
double off_node_bytes=0.0;
|
||||
int tag;
|
||||
|
||||
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+from*32;
|
||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(rrq);
|
||||
off_node_bytes+=bytes;
|
||||
if ( dox ) {
|
||||
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+from*32;
|
||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(rrq);
|
||||
off_node_bytes+=bytes;
|
||||
}
|
||||
}
|
||||
|
||||
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+_processor*32;
|
||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(xrq);
|
||||
off_node_bytes+=bytes;
|
||||
} else {
|
||||
// TODO : make a OMP loop on CPU, call threaded bcopy
|
||||
void *shm = (void *) this->ShmBufferTranslate(dest,recv);
|
||||
assert(shm!=NULL);
|
||||
// std::cout <<"acceleratorCopyDeviceToDeviceAsynch"<< std::endl;
|
||||
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,bytes);
|
||||
|
||||
if (dor) {
|
||||
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+_processor*32;
|
||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(xrq);
|
||||
off_node_bytes+=bytes;
|
||||
} else {
|
||||
void *shm = (void *) this->ShmBufferTranslate(dest,recv);
|
||||
assert(shm!=NULL);
|
||||
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
|
||||
this->StencilSendToRecvFromComplete(list,dir);
|
||||
list.resize(0);
|
||||
}
|
||||
|
||||
return off_node_bytes;
|
||||
|
@ -45,12 +45,14 @@ void CartesianCommunicator::Init(int *argc, char *** arv)
|
||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank)
|
||||
: CartesianCommunicator(processors)
|
||||
{
|
||||
_shm_processors = Coordinate(processors.size(),1);
|
||||
srank=0;
|
||||
SetCommunicator(communicator_world);
|
||||
}
|
||||
|
||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
||||
{
|
||||
_shm_processors = Coordinate(processors.size(),1);
|
||||
_processors = processors;
|
||||
_ndimension = processors.size(); assert(_ndimension>=1);
|
||||
_processor_coor.resize(_ndimension);
|
||||
@ -111,18 +113,18 @@ void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest
|
||||
}
|
||||
|
||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||
int xmit_to_rank,
|
||||
int xmit_to_rank,int dox,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int recv_from_rank,int dor,
|
||||
int bytes, int dir)
|
||||
{
|
||||
return 2.0*bytes;
|
||||
}
|
||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
int xmit_to_rank,int dox,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int recv_from_rank,int dor,
|
||||
int bytes, int dir)
|
||||
{
|
||||
return 2.0*bytes;
|
||||
|
@ -93,9 +93,10 @@ public:
|
||||
// Create an optimal reordered communicator that makes MPI_Cart_create get it right
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
static void Init(Grid_MPI_Comm comm); // Typically MPI_COMM_WORLD
|
||||
static void OptimalCommunicator (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
|
||||
static void OptimalCommunicatorHypercube (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
|
||||
static void OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
|
||||
// Turns MPI_COMM_WORLD into right layout for Cartesian
|
||||
static void OptimalCommunicator (const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
|
||||
static void OptimalCommunicatorHypercube (const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
|
||||
static void OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
|
||||
static void GetShmDims(const Coordinate &WorldDims,Coordinate &ShmDims);
|
||||
///////////////////////////////////////////////////
|
||||
// Provide shared memory facilities off comm world
|
||||
|
@ -152,7 +152,7 @@ int Log2Size(int TwoToPower,int MAXLOG2)
|
||||
}
|
||||
return log2size;
|
||||
}
|
||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||
{
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Look and see if it looks like an HPE 8600 based on hostname conventions
|
||||
@ -165,8 +165,8 @@ void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_M
|
||||
gethostname(name,namelen);
|
||||
int nscan = sscanf(name,"r%di%dn%d",&R,&I,&N) ;
|
||||
|
||||
if(nscan==3 && HPEhypercube ) OptimalCommunicatorHypercube(processors,optimal_comm);
|
||||
else OptimalCommunicatorSharedMemory(processors,optimal_comm);
|
||||
if(nscan==3 && HPEhypercube ) OptimalCommunicatorHypercube(processors,optimal_comm,SHM);
|
||||
else OptimalCommunicatorSharedMemory(processors,optimal_comm,SHM);
|
||||
}
|
||||
static inline int divides(int a,int b)
|
||||
{
|
||||
@ -221,7 +221,7 @@ void GlobalSharedMemory::GetShmDims(const Coordinate &WorldDims,Coordinate &ShmD
|
||||
dim=(dim+1) %ndimension;
|
||||
}
|
||||
}
|
||||
void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
||||
void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||
{
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Assert power of two shm_size.
|
||||
@ -294,7 +294,8 @@ void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processo
|
||||
Coordinate HyperCoor(ndimension);
|
||||
|
||||
GetShmDims(WorldDims,ShmDims);
|
||||
|
||||
SHM = ShmDims;
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Establish torus of processes and nodes with sub-blockings
|
||||
////////////////////////////////////////////////////////////////
|
||||
@ -341,7 +342,7 @@ void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processo
|
||||
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
||||
void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||
{
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Identify subblock of ranks on node spreading across dims
|
||||
@ -353,6 +354,8 @@ void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &proce
|
||||
Coordinate ShmCoor(ndimension); Coordinate NodeCoor(ndimension); Coordinate WorldCoor(ndimension);
|
||||
|
||||
GetShmDims(WorldDims,ShmDims);
|
||||
SHM=ShmDims;
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Establish torus of processes and nodes with sub-blockings
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
@ -48,9 +48,10 @@ void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
|
||||
_ShmSetup=1;
|
||||
}
|
||||
|
||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||
{
|
||||
optimal_comm = WorldComm;
|
||||
SHM = Coordinate(processors.size(),1);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -46,3 +46,4 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#include <Grid/lattice/Lattice_unary.h>
|
||||
#include <Grid/lattice/Lattice_transfer.h>
|
||||
#include <Grid/lattice/Lattice_basis.h>
|
||||
#include <Grid/lattice/Lattice_crc.h>
|
||||
|
55
Grid/lattice/Lattice_crc.h
Normal file
55
Grid/lattice/Lattice_crc.h
Normal file
@ -0,0 +1,55 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/lattice/Lattice_crc.h
|
||||
|
||||
Copyright (C) 2021
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#pragma once
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class vobj> void DumpSliceNorm(std::string s,Lattice<vobj> &f,int mu=-1)
|
||||
{
|
||||
auto ff = localNorm2(f);
|
||||
if ( mu==-1 ) mu = f.Grid()->Nd()-1;
|
||||
typedef typename vobj::tensor_reduced normtype;
|
||||
typedef typename normtype::scalar_object scalar;
|
||||
std::vector<scalar> sff;
|
||||
sliceSum(ff,sff,mu);
|
||||
for(int t=0;t<sff.size();t++){
|
||||
std::cout << s<<" "<<t<<" "<<sff[t]<<std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj> uint32_t crc(Lattice<vobj> & buf)
|
||||
{
|
||||
autoView( buf_v , buf, CpuRead);
|
||||
return ::crc32(0L,(unsigned char *)&buf_v[0],(size_t)sizeof(vobj)*buf.oSites());
|
||||
}
|
||||
|
||||
#define CRC(U) std::cout << "FingerPrint "<<__FILE__ <<" "<< __LINE__ <<" "<< #U <<" "<<crc(U)<<std::endl;
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
@ -69,6 +69,7 @@ GridLogger GridLogDebug (1, "Debug", GridLogColours, "PURPLE");
|
||||
GridLogger GridLogPerformance(1, "Performance", GridLogColours, "GREEN");
|
||||
GridLogger GridLogIterative (1, "Iterative", GridLogColours, "BLUE");
|
||||
GridLogger GridLogIntegrator (1, "Integrator", GridLogColours, "BLUE");
|
||||
GridLogger GridLogHMC (1, "HMC", GridLogColours, "BLUE");
|
||||
|
||||
void GridLogConfigure(std::vector<std::string> &logstreams) {
|
||||
GridLogError.Active(0);
|
||||
@ -79,6 +80,7 @@ void GridLogConfigure(std::vector<std::string> &logstreams) {
|
||||
GridLogPerformance.Active(0);
|
||||
GridLogIntegrator.Active(1);
|
||||
GridLogColours.Active(0);
|
||||
GridLogHMC.Active(1);
|
||||
|
||||
for (int i = 0; i < logstreams.size(); i++) {
|
||||
if (logstreams[i] == std::string("Error")) GridLogError.Active(1);
|
||||
@ -87,7 +89,8 @@ void GridLogConfigure(std::vector<std::string> &logstreams) {
|
||||
if (logstreams[i] == std::string("Iterative")) GridLogIterative.Active(1);
|
||||
if (logstreams[i] == std::string("Debug")) GridLogDebug.Active(1);
|
||||
if (logstreams[i] == std::string("Performance")) GridLogPerformance.Active(1);
|
||||
if (logstreams[i] == std::string("Integrator")) GridLogIntegrator.Active(1);
|
||||
if (logstreams[i] == std::string("NoIntegrator")) GridLogIntegrator.Active(0);
|
||||
if (logstreams[i] == std::string("NoHMC")) GridLogHMC.Active(0);
|
||||
if (logstreams[i] == std::string("Colours")) GridLogColours.Active(1);
|
||||
}
|
||||
}
|
||||
|
@ -182,6 +182,7 @@ extern GridLogger GridLogDebug ;
|
||||
extern GridLogger GridLogPerformance;
|
||||
extern GridLogger GridLogIterative ;
|
||||
extern GridLogger GridLogIntegrator ;
|
||||
extern GridLogger GridLogHMC;
|
||||
extern Colours GridLogColours;
|
||||
|
||||
std::string demangle(const char* name) ;
|
||||
|
@ -40,6 +40,29 @@ class Action
|
||||
|
||||
public:
|
||||
bool is_smeared = false;
|
||||
RealD deriv_norm_sum;
|
||||
RealD deriv_max_sum;
|
||||
int deriv_num;
|
||||
RealD deriv_us;
|
||||
RealD S_us;
|
||||
RealD refresh_us;
|
||||
void reset_timer(void) {
|
||||
deriv_us = S_us = refresh_us = 0.0;
|
||||
deriv_num=0;
|
||||
deriv_norm_sum = deriv_max_sum=0.0;
|
||||
}
|
||||
void deriv_log(RealD nrm, RealD max) { deriv_max_sum+=max; deriv_norm_sum+=nrm; deriv_num++;}
|
||||
RealD deriv_max_average(void) { return deriv_max_sum/deriv_num; };
|
||||
RealD deriv_norm_average(void) { return deriv_norm_sum/deriv_num; };
|
||||
RealD deriv_timer(void) { return deriv_us; };
|
||||
RealD S_timer(void) { return deriv_us; };
|
||||
RealD refresh_timer(void) { return deriv_us; };
|
||||
void deriv_timer_start(void) { deriv_us-=usecond(); }
|
||||
void deriv_timer_stop(void) { deriv_us+=usecond(); }
|
||||
void refresh_timer_start(void) { refresh_us-=usecond(); }
|
||||
void refresh_timer_stop(void) { refresh_us+=usecond(); }
|
||||
void S_timer_start(void) { S_us-=usecond(); }
|
||||
void S_timer_stop(void) { S_us+=usecond(); }
|
||||
// Heatbath?
|
||||
virtual void refresh(const GaugeField& U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) = 0; // refresh pseudofermions
|
||||
virtual RealD S(const GaugeField& U) = 0; // evaluate the action
|
||||
|
@ -37,6 +37,10 @@ NAMESPACE_CHECK(ActionSet);
|
||||
#include <Grid/qcd/action/ActionParams.h>
|
||||
NAMESPACE_CHECK(ActionParams);
|
||||
|
||||
#include <Grid/qcd/action/filters/MomentumFilter.h>
|
||||
#include <Grid/qcd/action/filters/DirichletFilter.h>
|
||||
#include <Grid/qcd/action/filters/DDHMCFilter.h>
|
||||
|
||||
////////////////////////////////////////////
|
||||
// Gauge Actions
|
||||
////////////////////////////////////////////
|
||||
|
@ -37,24 +37,32 @@ NAMESPACE_BEGIN(Grid);
|
||||
// These can move into a params header and be given MacroMagic serialisation
|
||||
struct GparityWilsonImplParams {
|
||||
Coordinate twists;
|
||||
GparityWilsonImplParams() : twists(Nd, 0) {};
|
||||
Coordinate dirichlet; // Blocksize of dirichlet BCs
|
||||
GparityWilsonImplParams() : twists(Nd, 0), dirichlet(Nd, 0) {};
|
||||
};
|
||||
|
||||
struct WilsonImplParams {
|
||||
bool overlapCommsCompute;
|
||||
Coordinate dirichlet; // Blocksize of dirichlet BCs
|
||||
AcceleratorVector<Real,Nd> twist_n_2pi_L;
|
||||
AcceleratorVector<Complex,Nd> boundary_phases;
|
||||
WilsonImplParams() {
|
||||
dirichlet.resize(Nd,0);
|
||||
boundary_phases.resize(Nd, 1.0);
|
||||
twist_n_2pi_L.resize(Nd, 0.0);
|
||||
};
|
||||
WilsonImplParams(const AcceleratorVector<Complex,Nd> phi) : boundary_phases(phi), overlapCommsCompute(false) {
|
||||
twist_n_2pi_L.resize(Nd, 0.0);
|
||||
dirichlet.resize(Nd,0);
|
||||
}
|
||||
};
|
||||
|
||||
struct StaggeredImplParams {
|
||||
StaggeredImplParams() {};
|
||||
Coordinate dirichlet; // Blocksize of dirichlet BCs
|
||||
StaggeredImplParams()
|
||||
{
|
||||
dirichlet.resize(Nd,0);
|
||||
};
|
||||
};
|
||||
|
||||
struct OneFlavourRationalParams : Serializable {
|
||||
|
@ -49,6 +49,8 @@ public:
|
||||
|
||||
virtual FermionField &tmp(void) = 0;
|
||||
|
||||
virtual void DirichletBlock(Coordinate & _Block) { assert(0); };
|
||||
|
||||
GridBase * Grid(void) { return FermionGrid(); }; // this is all the linalg routines need to know
|
||||
GridBase * RedBlackGrid(void) { return FermionRedBlackGrid(); };
|
||||
|
||||
|
@ -75,6 +75,10 @@ public:
|
||||
FermionField _tmp;
|
||||
FermionField &tmp(void) { return _tmp; }
|
||||
|
||||
int Dirichlet;
|
||||
Coordinate Block;
|
||||
|
||||
/********** Deprecate timers **********/
|
||||
void Report(void);
|
||||
void ZeroCounters(void);
|
||||
double DhopCalls;
|
||||
@ -173,7 +177,18 @@ public:
|
||||
GridCartesian &FourDimGrid,
|
||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||
double _M5,const ImplParams &p= ImplParams());
|
||||
|
||||
|
||||
virtual void DirichletBlock(Coordinate & block)
|
||||
{
|
||||
assert(block.size()==Nd+1);
|
||||
if ( block[0] || block[1] || block[2] || block[3] || block[4] ){
|
||||
Dirichlet = 1;
|
||||
Block = block;
|
||||
Stencil.DirichletBlock(block);
|
||||
StencilEven.DirichletBlock(block);
|
||||
StencilOdd.DirichletBlock(block);
|
||||
}
|
||||
}
|
||||
// Constructors
|
||||
/*
|
||||
WilsonFermion5D(int simd,
|
||||
|
@ -60,7 +60,8 @@ WilsonFermion5D<Impl>::WilsonFermion5D(GaugeField &_Umu,
|
||||
UmuOdd (_FourDimRedBlackGrid),
|
||||
Lebesgue(_FourDimGrid),
|
||||
LebesgueEvenOdd(_FourDimRedBlackGrid),
|
||||
_tmp(&FiveDimRedBlackGrid)
|
||||
_tmp(&FiveDimRedBlackGrid),
|
||||
Dirichlet(0)
|
||||
{
|
||||
// some assertions
|
||||
assert(FiveDimGrid._ndimension==5);
|
||||
@ -218,6 +219,14 @@ void WilsonFermion5D<Impl>::ImportGauge(const GaugeField &_Umu)
|
||||
{
|
||||
GaugeField HUmu(_Umu.Grid());
|
||||
HUmu = _Umu*(-0.5);
|
||||
if ( Dirichlet ) {
|
||||
std::cout << GridLogMessage << " Dirichlet BCs 5d " <<Block<<std::endl;
|
||||
Coordinate GaugeBlock(Nd);
|
||||
for(int d=0;d<Nd;d++) GaugeBlock[d] = Block[d+1];
|
||||
std::cout << GridLogMessage << " Dirichlet BCs 4d " <<GaugeBlock<<std::endl;
|
||||
DirichletFilter<GaugeField> Filter(GaugeBlock);
|
||||
Filter.applyFilter(HUmu);
|
||||
}
|
||||
Impl::DoubleStore(GaugeGrid(),Umu,HUmu);
|
||||
pickCheckerboard(Even,UmuEven,Umu);
|
||||
pickCheckerboard(Odd ,UmuOdd,Umu);
|
||||
|
102
Grid/qcd/action/filters/DDHMCFilter.h
Normal file
102
Grid/qcd/action/filters/DDHMCFilter.h
Normal file
@ -0,0 +1,102 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/hmc/integrators/DirichletFilter.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
//--------------------------------------------------------------------
|
||||
#pragma once
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
////////////////////////////////////////////////////
|
||||
// DDHMC filter with sub-block size B[mu]
|
||||
////////////////////////////////////////////////////
|
||||
|
||||
template<typename GaugeField>
|
||||
struct DDHMCFilter: public MomentumFilterBase<GaugeField>
|
||||
{
|
||||
Coordinate Block;
|
||||
int Width;
|
||||
|
||||
DDHMCFilter(const Coordinate &_Block,int _Width=2): Block(_Block) { Width=_Width; }
|
||||
|
||||
void applyFilter(GaugeField &U) const override
|
||||
{
|
||||
GridBase *grid = U.Grid();
|
||||
Coordinate Global=grid->GlobalDimensions();
|
||||
GaugeField zzz(grid); zzz = Zero();
|
||||
LatticeInteger coor(grid);
|
||||
|
||||
auto zzz_mu = PeekIndex<LorentzIndex>(zzz,0);
|
||||
////////////////////////////////////////////////////
|
||||
// Zero BDY layers
|
||||
////////////////////////////////////////////////////
|
||||
std::cout<<GridLogMessage<<" DDHMC Force Filter Block "<<Block<<" width " <<Width<<std::endl;
|
||||
for(int mu=0;mu<Nd;mu++) {
|
||||
|
||||
Integer B1 = Block[mu];
|
||||
if ( B1 && (B1 <= Global[mu]) ) {
|
||||
LatticeCoordinate(coor,mu);
|
||||
|
||||
////////////////////////////////
|
||||
// OmegaBar - zero all links contained in slice B-1,0 and
|
||||
// mu links connecting to Omega
|
||||
////////////////////////////////
|
||||
if ( Width==1) {
|
||||
U = where(mod(coor,B1)==Integer(B1-1),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(0) ,zzz,U);
|
||||
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
|
||||
U_mu = where(mod(coor,B1)==Integer(B1-2),zzz_mu,U_mu);
|
||||
PokeIndex<LorentzIndex>(U, U_mu, mu);
|
||||
}
|
||||
if ( Width==2) {
|
||||
U = where(mod(coor,B1)==Integer(B1-2),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(B1-1),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(0) ,zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(1) ,zzz,U);
|
||||
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
|
||||
U_mu = where(mod(coor,B1)==Integer(B1-3),zzz_mu,U_mu);
|
||||
PokeIndex<LorentzIndex>(U, U_mu, mu);
|
||||
}
|
||||
if ( Width==3) {
|
||||
U = where(mod(coor,B1)==Integer(B1-3),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(B1-2),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(B1-1),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(0) ,zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(1) ,zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(2) ,zzz,U);
|
||||
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
|
||||
U_mu = where(mod(coor,B1)==Integer(B1-4),zzz_mu,U_mu);
|
||||
PokeIndex<LorentzIndex>(U, U_mu, mu);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
71
Grid/qcd/action/filters/DirichletFilter.h
Normal file
71
Grid/qcd/action/filters/DirichletFilter.h
Normal file
@ -0,0 +1,71 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/hmc/integrators/DirichletFilter.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
//--------------------------------------------------------------------
|
||||
#pragma once
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<typename MomentaField>
|
||||
struct DirichletFilter: public MomentumFilterBase<MomentaField>
|
||||
{
|
||||
typedef typename MomentaField::vector_type vector_type; //SIMD-vectorized complex type
|
||||
typedef typename MomentaField::scalar_type scalar_type; //scalar complex type
|
||||
|
||||
typedef iScalar<iScalar<iScalar<vector_type> > > ScalarType; //complex phase for each site
|
||||
|
||||
Coordinate Block;
|
||||
|
||||
DirichletFilter(const Coordinate &_Block): Block(_Block){}
|
||||
|
||||
void applyFilter(MomentaField &P) const override
|
||||
{
|
||||
GridBase *grid = P.Grid();
|
||||
typedef decltype(PeekIndex<LorentzIndex>(P, 0)) LatCM;
|
||||
////////////////////////////////////////////////////
|
||||
// Zero strictly links crossing between domains
|
||||
////////////////////////////////////////////////////
|
||||
LatticeInteger coor(grid);
|
||||
LatCM zz(grid); zz = Zero();
|
||||
for(int mu=0;mu<Nd;mu++) {
|
||||
if ( (Block[mu]) && (Block[mu] < grid->GlobalDimensions()[mu] ) ) {
|
||||
// If costly could provide Grid earlier and precompute masks
|
||||
std::cout << GridLogMessage << " Dirichlet in mu="<<mu<<std::endl;
|
||||
LatticeCoordinate(coor,mu);
|
||||
auto P_mu = PeekIndex<LorentzIndex>(P, mu);
|
||||
P_mu = where(mod(coor,Block[mu])==Integer(Block[mu]-1),zz,P_mu);
|
||||
PokeIndex<LorentzIndex>(P, P_mu, mu);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
@ -129,18 +129,10 @@ public:
|
||||
Runner(S);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////
|
||||
|
||||
private:
|
||||
template <class SmearingPolicy>
|
||||
void Runner(SmearingPolicy &Smearing) {
|
||||
auto UGrid = Resources.GetCartesian();
|
||||
Resources.AddRNGs();
|
||||
Field U(UGrid);
|
||||
|
||||
// Can move this outside?
|
||||
typedef IntegratorType<SmearingPolicy> TheIntegrator;
|
||||
TheIntegrator MDynamics(UGrid, Parameters.MD, TheAction, Smearing);
|
||||
//Use the checkpointer to initialize the RNGs and the gauge field, writing the resulting gauge field into U.
|
||||
//This is called automatically by Run but may be useful elsewhere, e.g. for integrator tuning experiments
|
||||
void initializeGaugeFieldAndRNGs(Field &U){
|
||||
if(!Resources.haveRNGs()) Resources.AddRNGs();
|
||||
|
||||
if (Parameters.StartingType == "HotStart") {
|
||||
// Hot start
|
||||
@ -167,6 +159,25 @@ private:
|
||||
<< "Valid [HotStart, ColdStart, TepidStart, CheckpointStart]\n";
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////
|
||||
|
||||
private:
|
||||
template <class SmearingPolicy>
|
||||
void Runner(SmearingPolicy &Smearing) {
|
||||
auto UGrid = Resources.GetCartesian();
|
||||
Field U(UGrid);
|
||||
|
||||
initializeGaugeFieldAndRNGs(U);
|
||||
|
||||
typedef IntegratorType<SmearingPolicy> TheIntegrator;
|
||||
TheIntegrator MDynamics(UGrid, Parameters.MD, TheAction, Smearing);
|
||||
|
||||
// Sets the momentum filter
|
||||
MDynamics.setMomentumFilter(*(Resources.GetMomentumFilter()));
|
||||
|
||||
Smearing.set_Field(U);
|
||||
|
||||
|
@ -34,6 +34,7 @@ directory
|
||||
* @brief Classes for Hybrid Monte Carlo update
|
||||
*
|
||||
* @author Guido Cossu
|
||||
* @author Peter Boyle
|
||||
*/
|
||||
//--------------------------------------------------------------------
|
||||
#pragma once
|
||||
@ -115,22 +116,17 @@ private:
|
||||
|
||||
random(sRNG, rn_test);
|
||||
|
||||
std::cout << GridLogMessage
|
||||
<< "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << "exp(-dH) = " << prob
|
||||
<< " Random = " << rn_test << "\n";
|
||||
std::cout << GridLogMessage
|
||||
<< "Acc. Probability = " << ((prob < 1.0) ? prob : 1.0) << "\n";
|
||||
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||
std::cout << GridLogHMC << "exp(-dH) = " << prob << " Random = " << rn_test << "\n";
|
||||
std::cout << GridLogHMC << "Acc. Probability = " << ((prob < 1.0) ? prob : 1.0) << "\n";
|
||||
|
||||
if ((prob > 1.0) || (rn_test <= prob)) { // accepted
|
||||
std::cout << GridLogMessage << "Metropolis_test -- ACCEPTED\n";
|
||||
std::cout << GridLogMessage
|
||||
<< "--------------------------------------------------\n";
|
||||
std::cout << GridLogHMC << "Metropolis_test -- ACCEPTED\n";
|
||||
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||
return true;
|
||||
} else { // rejected
|
||||
std::cout << GridLogMessage << "Metropolis_test -- REJECTED\n";
|
||||
std::cout << GridLogMessage
|
||||
<< "--------------------------------------------------\n";
|
||||
std::cout << GridLogHMC << "Metropolis_test -- REJECTED\n";
|
||||
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -139,19 +135,68 @@ private:
|
||||
// Evolution
|
||||
/////////////////////////////////////////////////////////
|
||||
RealD evolve_hmc_step(Field &U) {
|
||||
TheIntegrator.refresh(U, sRNG, pRNG); // set U and initialize P and phi's
|
||||
|
||||
RealD H0 = TheIntegrator.S(U); // initial state action
|
||||
GridBase *Grid = U.Grid();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Mainly for DDHMC perform a random translation of U modulo volume
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << "Random shifting gauge field by [";
|
||||
for(int d=0;d<Grid->Nd();d++) {
|
||||
|
||||
int L = Grid->GlobalDimensions()[d];
|
||||
|
||||
RealD rn_uniform; random(sRNG, rn_uniform);
|
||||
|
||||
int shift = (int) (rn_uniform*L);
|
||||
|
||||
std::cout << shift;
|
||||
if(d<Grid->Nd()-1) std::cout <<",";
|
||||
else std::cout <<"]\n";
|
||||
|
||||
U = Cshift(U,d,shift);
|
||||
}
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
|
||||
TheIntegrator.reset_timer();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// set U and initialize P and phi's
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << "Refresh momenta and pseudofermions";
|
||||
TheIntegrator.refresh(U, sRNG, pRNG);
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// initial state action
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << "Compute initial action";
|
||||
RealD H0 = TheIntegrator.S(U);
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
|
||||
std::streamsize current_precision = std::cout.precision();
|
||||
std::cout.precision(15);
|
||||
std::cout << GridLogMessage << "Total H before trajectory = " << H0 << "\n";
|
||||
std::cout << GridLogHMC << "Total H before trajectory = " << H0 << "\n";
|
||||
std::cout.precision(current_precision);
|
||||
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << " Molecular Dynamics evolution ";
|
||||
TheIntegrator.integrate(U);
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
|
||||
RealD H1 = TheIntegrator.S(U); // updated state action
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// updated state action
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << "Compute final action";
|
||||
RealD H1 = TheIntegrator.S(U);
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
if(0){
|
||||
std::cout << "------------------------- Reversibility test" << std::endl;
|
||||
@ -163,17 +208,16 @@ private:
|
||||
}
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
std::cout.precision(15);
|
||||
std::cout << GridLogMessage << "Total H after trajectory = " << H1
|
||||
<< " dH = " << H1 - H0 << "\n";
|
||||
|
||||
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||
std::cout << GridLogHMC << "Total H after trajectory = " << H1 << " dH = " << H1 - H0 << "\n";
|
||||
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||
|
||||
std::cout.precision(current_precision);
|
||||
|
||||
return (H1 - H0);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
public:
|
||||
/////////////////////////////////////////
|
||||
@ -195,10 +239,13 @@ public:
|
||||
|
||||
// Actual updates (evolve a copy Ucopy then copy back eventually)
|
||||
unsigned int FinalTrajectory = Params.Trajectories + Params.NoMetropolisUntil + Params.StartTrajectory;
|
||||
|
||||
for (int traj = Params.StartTrajectory; traj < FinalTrajectory; ++traj) {
|
||||
std::cout << GridLogMessage << "-- # Trajectory = " << traj << "\n";
|
||||
|
||||
std::cout << GridLogHMC << "-- # Trajectory = " << traj << "\n";
|
||||
|
||||
if (traj < Params.StartTrajectory + Params.NoMetropolisUntil) {
|
||||
std::cout << GridLogMessage << "-- Thermalization" << std::endl;
|
||||
std::cout << GridLogHMC << "-- Thermalization" << std::endl;
|
||||
}
|
||||
|
||||
double t0=usecond();
|
||||
@ -207,20 +254,19 @@ public:
|
||||
DeltaH = evolve_hmc_step(Ucopy);
|
||||
// Metropolis-Hastings test
|
||||
bool accept = true;
|
||||
if (traj >= Params.StartTrajectory + Params.NoMetropolisUntil) {
|
||||
if (Params.MetropolisTest && traj >= Params.StartTrajectory + Params.NoMetropolisUntil) {
|
||||
accept = metropolis_test(DeltaH);
|
||||
} else {
|
||||
std::cout << GridLogMessage << "Skipping Metropolis test" << std::endl;
|
||||
std::cout << GridLogHMC << "Skipping Metropolis test" << std::endl;
|
||||
}
|
||||
|
||||
if (accept)
|
||||
Ucur = Ucopy;
|
||||
|
||||
|
||||
|
||||
double t1=usecond();
|
||||
std::cout << GridLogMessage << "Total time for trajectory (s): " << (t1-t0)/1e6 << std::endl;
|
||||
std::cout << GridLogHMC << "Total time for trajectory (s): " << (t1-t0)/1e6 << std::endl;
|
||||
|
||||
TheIntegrator.print_timer();
|
||||
|
||||
for (int obs = 0; obs < Observables.size(); obs++) {
|
||||
std::cout << GridLogDebug << "Observables # " << obs << std::endl;
|
||||
@ -228,7 +274,7 @@ public:
|
||||
std::cout << GridLogDebug << "Observables pointer " << Observables[obs] << std::endl;
|
||||
Observables[obs]->TrajectoryComplete(traj + 1, Ucur, sRNG, pRNG);
|
||||
}
|
||||
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::::" << std::endl;
|
||||
std::cout << GridLogHMC << ":::::::::::::::::::::::::::::::::::::::::::" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -72,6 +72,8 @@ class HMCResourceManager {
|
||||
typedef HMCModuleBase< BaseHmcCheckpointer<ImplementationPolicy> > CheckpointerBaseModule;
|
||||
typedef HMCModuleBase< HmcObservable<typename ImplementationPolicy::Field> > ObservableBaseModule;
|
||||
typedef ActionModuleBase< Action<typename ImplementationPolicy::Field>, GridModule > ActionBaseModule;
|
||||
typedef typename ImplementationPolicy::Field MomentaField;
|
||||
typedef typename ImplementationPolicy::Field Field;
|
||||
|
||||
// Named storage for grid pairs (std + red-black)
|
||||
std::unordered_map<std::string, GridModule> Grids;
|
||||
@ -80,6 +82,9 @@ class HMCResourceManager {
|
||||
// SmearingModule<ImplementationPolicy> Smearing;
|
||||
std::unique_ptr<CheckpointerBaseModule> CP;
|
||||
|
||||
// Momentum filter
|
||||
std::unique_ptr<MomentumFilterBase<typename ImplementationPolicy::Field> > Filter;
|
||||
|
||||
// A vector of HmcObservable modules
|
||||
std::vector<std::unique_ptr<ObservableBaseModule> > ObservablesList;
|
||||
|
||||
@ -90,6 +95,7 @@ class HMCResourceManager {
|
||||
|
||||
bool have_RNG;
|
||||
bool have_CheckPointer;
|
||||
bool have_Filter;
|
||||
|
||||
// NOTE: operator << is not overloaded for std::vector<string>
|
||||
// so this function is necessary
|
||||
@ -101,7 +107,7 @@ class HMCResourceManager {
|
||||
|
||||
|
||||
public:
|
||||
HMCResourceManager() : have_RNG(false), have_CheckPointer(false) {}
|
||||
HMCResourceManager() : have_RNG(false), have_CheckPointer(false), have_Filter(false) {}
|
||||
|
||||
template <class ReaderClass, class vector_type = vComplex >
|
||||
void initialize(ReaderClass &Read){
|
||||
@ -129,6 +135,7 @@ public:
|
||||
RNGModuleParameters RNGpar(Read);
|
||||
SetRNGSeeds(RNGpar);
|
||||
|
||||
|
||||
// Observables
|
||||
auto &ObsFactory = HMC_ObservablesModuleFactory<observable_string, typename ImplementationPolicy::Field, ReaderClass>::getInstance();
|
||||
Read.push(observable_string);// here must check if existing...
|
||||
@ -208,6 +215,16 @@ public:
|
||||
AddGrid(s, Mod);
|
||||
}
|
||||
|
||||
void SetMomentumFilter( MomentumFilterBase<typename ImplementationPolicy::Field> * MomFilter) {
|
||||
assert(have_Filter==false);
|
||||
Filter = std::unique_ptr<MomentumFilterBase<typename ImplementationPolicy::Field> >(MomFilter);
|
||||
have_Filter = true;
|
||||
}
|
||||
MomentumFilterBase<typename ImplementationPolicy::Field> *GetMomentumFilter(void) {
|
||||
if ( !have_Filter)
|
||||
SetMomentumFilter(new MomentumFilterNone<typename ImplementationPolicy::Field>());
|
||||
return Filter.get();
|
||||
}
|
||||
|
||||
GridCartesian* GetCartesian(std::string s = "") {
|
||||
if (s.empty()) s = Grids.begin()->first;
|
||||
@ -226,6 +243,9 @@ public:
|
||||
//////////////////////////////////////////////////////
|
||||
// Random number generators
|
||||
//////////////////////////////////////////////////////
|
||||
|
||||
//Return true if the RNG objects have been instantiated
|
||||
bool haveRNGs() const{ return have_RNG; }
|
||||
|
||||
void AddRNGs(std::string s = "") {
|
||||
// Couple the RNGs to the GridModule tagged by s
|
||||
|
@ -33,7 +33,6 @@ directory
|
||||
#define INTEGRATOR_INCLUDED
|
||||
|
||||
#include <memory>
|
||||
#include "MomentumFilter.h"
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
@ -67,6 +66,7 @@ public:
|
||||
template <class FieldImplementation, class SmearingPolicy, class RepresentationPolicy>
|
||||
class Integrator {
|
||||
protected:
|
||||
|
||||
typedef typename FieldImplementation::Field MomentaField; //for readability
|
||||
typedef typename FieldImplementation::Field Field;
|
||||
|
||||
@ -119,36 +119,58 @@ protected:
|
||||
}
|
||||
} update_P_hireps{};
|
||||
|
||||
|
||||
void update_P(MomentaField& Mom, Field& U, int level, double ep) {
|
||||
// input U actually not used in the fundamental case
|
||||
// Fundamental updates, include smearing
|
||||
|
||||
for (int a = 0; a < as[level].actions.size(); ++a) {
|
||||
|
||||
double start_full = usecond();
|
||||
Field force(U.Grid());
|
||||
conformable(U.Grid(), Mom.Grid());
|
||||
|
||||
Field& Us = Smearer.get_U(as[level].actions.at(a)->is_smeared);
|
||||
double start_force = usecond();
|
||||
as[level].actions.at(a)->deriv_timer_start();
|
||||
as[level].actions.at(a)->deriv(Us, force); // deriv should NOT include Ta
|
||||
as[level].actions.at(a)->deriv_timer_stop();
|
||||
|
||||
std::cout << GridLogIntegrator << "Smearing (on/off): " << as[level].actions.at(a)->is_smeared << std::endl;
|
||||
auto name = as[level].actions.at(a)->action_name();
|
||||
if (as[level].actions.at(a)->is_smeared) Smearer.smeared_force(force);
|
||||
|
||||
force = FieldImplementation::projectForce(force); // Ta for gauge fields
|
||||
double end_force = usecond();
|
||||
Real force_abs = std::sqrt(norm2(force)/U.Grid()->gSites());
|
||||
std::cout << GridLogIntegrator << "["<<level<<"]["<<a<<"] Force average: " << force_abs << std::endl;
|
||||
|
||||
MomFilter->applyFilter(force);
|
||||
std::cout << GridLogIntegrator << " update_P : Level [" << level <<"]["<<a <<"] "<<name<< std::endl;
|
||||
DumpSliceNorm("force ",force,Nd-1);
|
||||
|
||||
Real force_abs = std::sqrt(norm2(force)/U.Grid()->gSites()); //average per-site norm. nb. norm2(latt) = \sum_x norm2(latt[x])
|
||||
Real impulse_abs = force_abs * ep * HMC_MOMENTUM_DENOMINATOR;
|
||||
|
||||
Real force_max = std::sqrt(maxLocalNorm2(force));
|
||||
Real impulse_max = force_max * ep * HMC_MOMENTUM_DENOMINATOR;
|
||||
|
||||
as[level].actions.at(a)->deriv_log(force_abs,force_max);
|
||||
|
||||
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Force average: " << force_abs <<" "<<name<<std::endl;
|
||||
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Force max : " << force_max <<" "<<name<<std::endl;
|
||||
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Fdt average : " << impulse_abs <<" "<<name<<std::endl;
|
||||
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Fdt max : " << impulse_max <<" "<<name<<std::endl;
|
||||
|
||||
Mom -= force * ep* HMC_MOMENTUM_DENOMINATOR;;
|
||||
double end_full = usecond();
|
||||
double time_full = (end_full - start_full) / 1e3;
|
||||
double time_force = (end_force - start_force) / 1e3;
|
||||
std::cout << GridLogMessage << "["<<level<<"]["<<a<<"] P update elapsed time: " << time_full << " ms (force: " << time_force << " ms)" << std::endl;
|
||||
|
||||
}
|
||||
|
||||
// Force from the other representations
|
||||
as[level].apply(update_P_hireps, Representations, Mom, U, ep);
|
||||
|
||||
MomFilter->applyFilter(Mom);
|
||||
}
|
||||
|
||||
void update_U(Field& U, double ep)
|
||||
@ -162,8 +184,12 @@ protected:
|
||||
|
||||
void update_U(MomentaField& Mom, Field& U, double ep)
|
||||
{
|
||||
MomentaField MomFiltered(Mom.Grid());
|
||||
MomFiltered = Mom;
|
||||
MomFilter->applyFilter(MomFiltered);
|
||||
|
||||
// exponential of Mom*U in the gauge fields case
|
||||
FieldImplementation::update_field(Mom, U, ep);
|
||||
FieldImplementation::update_field(MomFiltered, U, ep);
|
||||
|
||||
// Update the smeared fields, can be implemented as observer
|
||||
Smearer.set_Field(U);
|
||||
@ -206,6 +232,66 @@ public:
|
||||
const MomentaField & getMomentum() const{ return P; }
|
||||
|
||||
|
||||
void reset_timer(void)
|
||||
{
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
as[level].actions.at(actionID)->reset_timer();
|
||||
}
|
||||
}
|
||||
}
|
||||
void print_timer(void)
|
||||
{
|
||||
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::" << std::endl;
|
||||
std::cout << GridLogMessage << " Refresh cumulative timings "<<std::endl;
|
||||
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
std::cout << GridLogMessage
|
||||
<< as[level].actions.at(actionID)->action_name()
|
||||
<<"["<<level<<"]["<< actionID<<"] "
|
||||
<< as[level].actions.at(actionID)->refresh_us*1.0e-6<<" s"<< std::endl;
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||
std::cout << GridLogMessage << " Action cumulative timings "<<std::endl;
|
||||
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
std::cout << GridLogMessage
|
||||
<< as[level].actions.at(actionID)->action_name()
|
||||
<<"["<<level<<"]["<< actionID<<"] "
|
||||
<< as[level].actions.at(actionID)->S_us*1.0e-6<<" s"<< std::endl;
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||
std::cout << GridLogMessage << " Force cumulative timings "<<std::endl;
|
||||
std::cout << GridLogMessage << "------------------------- "<<std::endl;
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
std::cout << GridLogMessage
|
||||
<< as[level].actions.at(actionID)->action_name()
|
||||
<<"["<<level<<"]["<< actionID<<"] "
|
||||
<< as[level].actions.at(actionID)->deriv_us*1.0e-6<<" s"<< std::endl;
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||
std::cout << GridLogMessage << " Force average size "<<std::endl;
|
||||
std::cout << GridLogMessage << "------------------------- "<<std::endl;
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
std::cout << GridLogMessage
|
||||
<< as[level].actions.at(actionID)->action_name()
|
||||
<<"["<<level<<"]["<< actionID<<"] : "
|
||||
<<" force max " << as[level].actions.at(actionID)->deriv_max_average()
|
||||
<<" norm " << as[level].actions.at(actionID)->deriv_norm_average()
|
||||
<<" calls " << as[level].actions.at(actionID)->deriv_num
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::"<< std::endl;
|
||||
}
|
||||
|
||||
void print_parameters()
|
||||
{
|
||||
std::cout << GridLogMessage << "[Integrator] Name : "<< integrator_name() << std::endl;
|
||||
@ -224,7 +310,6 @@ public:
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::"<< std::endl;
|
||||
|
||||
}
|
||||
|
||||
void reverse_momenta()
|
||||
@ -267,15 +352,19 @@ public:
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
// get gauge field from the SmearingPolicy and
|
||||
// based on the boolean is_smeared in actionID
|
||||
auto name = as[level].actions.at(actionID)->action_name();
|
||||
std::cout << GridLogMessage << "refresh [" << level << "][" << actionID << "] "<<name << std::endl;
|
||||
|
||||
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
|
||||
as[level].actions.at(actionID)->refresh_timer_start();
|
||||
as[level].actions.at(actionID)->refresh(Us, sRNG, pRNG);
|
||||
as[level].actions.at(actionID)->refresh_timer_stop();
|
||||
}
|
||||
|
||||
// Refresh the higher representation actions
|
||||
as[level].apply(refresh_hireps, Representations, sRNG, pRNG);
|
||||
}
|
||||
|
||||
MomFilter->applyFilter(P);
|
||||
}
|
||||
|
||||
// to be used by the actionlevel class to iterate
|
||||
@ -310,7 +399,9 @@ public:
|
||||
// based on the boolean is_smeared in actionID
|
||||
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
|
||||
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] action eval " << std::endl;
|
||||
as[level].actions.at(actionID)->S_timer_start();
|
||||
Hterm = as[level].actions.at(actionID)->S(Us);
|
||||
as[level].actions.at(actionID)->S_timer_stop();
|
||||
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] H = " << Hterm << std::endl;
|
||||
H += Hterm;
|
||||
}
|
||||
|
@ -52,6 +52,11 @@ public:
|
||||
return arg;
|
||||
}
|
||||
};
|
||||
class SimpleStencilParams{
|
||||
public:
|
||||
Coordinate dirichlet;
|
||||
SimpleStencilParams() {};
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
@ -133,6 +133,8 @@ class CartesianStencilAccelerator {
|
||||
int _osites;
|
||||
StencilVector _directions;
|
||||
StencilVector _distances;
|
||||
StencilVector _comms_send;
|
||||
StencilVector _comms_recv;
|
||||
StencilVector _comm_buf_size;
|
||||
StencilVector _permute_type;
|
||||
StencilVector same_node;
|
||||
@ -226,6 +228,8 @@ public:
|
||||
void * recv_buf;
|
||||
Integer to_rank;
|
||||
Integer from_rank;
|
||||
Integer do_send;
|
||||
Integer do_recv;
|
||||
Integer bytes;
|
||||
};
|
||||
struct Merge {
|
||||
@ -240,7 +244,20 @@ public:
|
||||
cobj * mpi_p;
|
||||
Integer buffer_size;
|
||||
};
|
||||
|
||||
struct CopyReceiveBuffer {
|
||||
void * from_p;
|
||||
void * to_p;
|
||||
Integer bytes;
|
||||
};
|
||||
struct CachedTransfer {
|
||||
Integer direction;
|
||||
Integer OrthogPlane;
|
||||
Integer DestProc;
|
||||
Integer bytes;
|
||||
Integer lane;
|
||||
Integer cb;
|
||||
void *recv_buf;
|
||||
};
|
||||
|
||||
protected:
|
||||
GridBase * _grid;
|
||||
@ -271,7 +288,8 @@ public:
|
||||
std::vector<Merge> MergersSHM;
|
||||
std::vector<Decompress> Decompressions;
|
||||
std::vector<Decompress> DecompressionsSHM;
|
||||
|
||||
std::vector<CopyReceiveBuffer> CopyReceiveBuffers ;
|
||||
std::vector<CachedTransfer> CachedTransfers;
|
||||
///////////////////////////////////////////////////////////
|
||||
// Unified Comms buffers for all directions
|
||||
///////////////////////////////////////////////////////////
|
||||
@ -284,29 +302,6 @@ public:
|
||||
int u_comm_offset;
|
||||
int _unified_buffer_size;
|
||||
|
||||
/////////////////////////////////////////
|
||||
// Timing info; ugly; possibly temporary
|
||||
/////////////////////////////////////////
|
||||
double commtime;
|
||||
double mpi3synctime;
|
||||
double mpi3synctime_g;
|
||||
double shmmergetime;
|
||||
double gathertime;
|
||||
double gathermtime;
|
||||
double halogtime;
|
||||
double mergetime;
|
||||
double decompresstime;
|
||||
double comms_bytes;
|
||||
double shm_bytes;
|
||||
double splicetime;
|
||||
double nosplicetime;
|
||||
double calls;
|
||||
std::vector<double> comm_bytes_thr;
|
||||
std::vector<double> shm_bytes_thr;
|
||||
std::vector<double> comm_time_thr;
|
||||
std::vector<double> comm_enter_thr;
|
||||
std::vector<double> comm_leave_thr;
|
||||
|
||||
////////////////////////////////////////
|
||||
// Stencil query
|
||||
////////////////////////////////////////
|
||||
@ -333,11 +328,12 @@ public:
|
||||
//////////////////////////////////////////
|
||||
// Comms packet queue for asynch thread
|
||||
// Use OpenMP Tasks for cleaner ???
|
||||
// must be called *inside* parallel region
|
||||
//////////////////////////////////////////
|
||||
/*
|
||||
void CommunicateThreaded()
|
||||
{
|
||||
#ifdef GRID_OMP
|
||||
// must be called in parallel region
|
||||
int mythread = omp_get_thread_num();
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
#else
|
||||
@ -346,65 +342,29 @@ public:
|
||||
#endif
|
||||
if (nthreads == -1) nthreads = 1;
|
||||
if (mythread < nthreads) {
|
||||
comm_enter_thr[mythread] = usecond();
|
||||
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
||||
uint64_t bytes = _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
||||
Packets[i].to_rank,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,
|
||||
Packets[i].bytes,i);
|
||||
comm_bytes_thr[mythread] += bytes;
|
||||
shm_bytes_thr[mythread] += 2*Packets[i].bytes-bytes; // Send + Recv.
|
||||
|
||||
}
|
||||
comm_leave_thr[mythread]= usecond();
|
||||
comm_time_thr[mythread] += comm_leave_thr[mythread] - comm_enter_thr[mythread];
|
||||
}
|
||||
}
|
||||
|
||||
void CollateThreads(void)
|
||||
{
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
double first=0.0;
|
||||
double last =0.0;
|
||||
|
||||
for(int t=0;t<nthreads;t++) {
|
||||
|
||||
double t0 = comm_enter_thr[t];
|
||||
double t1 = comm_leave_thr[t];
|
||||
comms_bytes+=comm_bytes_thr[t];
|
||||
shm_bytes +=shm_bytes_thr[t];
|
||||
|
||||
comm_enter_thr[t] = 0.0;
|
||||
comm_leave_thr[t] = 0.0;
|
||||
comm_time_thr[t] = 0.0;
|
||||
comm_bytes_thr[t]=0;
|
||||
shm_bytes_thr[t]=0;
|
||||
|
||||
if ( first == 0.0 ) first = t0; // first is t0
|
||||
if ( (t0 > 0.0) && ( t0 < first ) ) first = t0; // min time seen
|
||||
|
||||
if ( t1 > last ) last = t1; // max time seen
|
||||
|
||||
}
|
||||
commtime+= last-first;
|
||||
}
|
||||
*/
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Non blocking send and receive. Necessarily parallel.
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
|
||||
{
|
||||
reqs.resize(Packets.size());
|
||||
commtime-=usecond();
|
||||
for(int i=0;i<Packets.size();i++){
|
||||
uint64_t bytes=_grid->StencilSendToRecvFromBegin(reqs[i],
|
||||
Packets[i].send_buf,
|
||||
Packets[i].to_rank,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,
|
||||
Packets[i].bytes,i);
|
||||
comms_bytes+=bytes;
|
||||
shm_bytes +=2*Packets[i].bytes-bytes;
|
||||
_grid->StencilSendToRecvFromBegin(reqs[i],
|
||||
Packets[i].send_buf,
|
||||
Packets[i].to_rank,Packets[i].do_send,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,Packets[i].do_recv,
|
||||
Packets[i].bytes,i);
|
||||
}
|
||||
}
|
||||
|
||||
@ -413,7 +373,6 @@ public:
|
||||
for(int i=0;i<Packets.size();i++){
|
||||
_grid->StencilSendToRecvFromComplete(reqs[i],i);
|
||||
}
|
||||
commtime+=usecond();
|
||||
}
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Blocking send and receive. Either sequential or parallel.
|
||||
@ -421,28 +380,27 @@ public:
|
||||
void Communicate(void)
|
||||
{
|
||||
if ( CartesianCommunicator::CommunicatorPolicy == CartesianCommunicator::CommunicatorPolicySequential ){
|
||||
thread_region {
|
||||
// must be called in parallel region
|
||||
int mythread = thread_num();
|
||||
int maxthreads= thread_max();
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
assert(nthreads <= maxthreads);
|
||||
if (nthreads == -1) nthreads = 1;
|
||||
if (mythread < nthreads) {
|
||||
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
||||
double start = usecond();
|
||||
uint64_t bytes= _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
||||
Packets[i].to_rank,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,
|
||||
Packets[i].bytes,i);
|
||||
comm_bytes_thr[mythread] += bytes;
|
||||
shm_bytes_thr[mythread] += Packets[i].bytes - bytes;
|
||||
comm_time_thr[mythread] += usecond() - start;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else { // Concurrent and non-threaded asynch calls to MPI
|
||||
/////////////////////////////////////////////////////////
|
||||
// several way threaded on different communicators.
|
||||
// Cannot combine with Dirichlet operators
|
||||
// This scheme is needed on Intel Omnipath for best performance
|
||||
// Deprecate once there are very few omnipath clusters
|
||||
/////////////////////////////////////////////////////////
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
int old = GridThread::GetThreads();
|
||||
GridThread::SetThreads(nthreads);
|
||||
thread_for(i,Packets.size(),{
|
||||
_grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
||||
Packets[i].to_rank,Packets[i].do_send,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,Packets[i].do_recv,
|
||||
Packets[i].bytes,i);
|
||||
});
|
||||
GridThread::SetThreads(old);
|
||||
} else {
|
||||
/////////////////////////////////////////////////////////
|
||||
// Concurrent and non-threaded asynch calls to MPI
|
||||
/////////////////////////////////////////////////////////
|
||||
std::vector<std::vector<CommsRequest_t> > reqs;
|
||||
this->CommunicateBegin(reqs);
|
||||
this->CommunicateComplete(reqs);
|
||||
@ -484,31 +442,23 @@ public:
|
||||
sshift[1] = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,Odd);
|
||||
if ( sshift[0] == sshift[1] ) {
|
||||
if (splice_dim) {
|
||||
splicetime-=usecond();
|
||||
auto tmp = GatherSimd(source,dimension,shift,0x3,compress,face_idx);
|
||||
auto tmp = GatherSimd(source,dimension,shift,0x3,compress,face_idx,point);
|
||||
is_same_node = is_same_node && tmp;
|
||||
splicetime+=usecond();
|
||||
} else {
|
||||
nosplicetime-=usecond();
|
||||
auto tmp = Gather(source,dimension,shift,0x3,compress,face_idx);
|
||||
auto tmp = Gather(source,dimension,shift,0x3,compress,face_idx,point);
|
||||
is_same_node = is_same_node && tmp;
|
||||
nosplicetime+=usecond();
|
||||
}
|
||||
} else {
|
||||
if(splice_dim){
|
||||
splicetime-=usecond();
|
||||
// if checkerboard is unfavourable take two passes
|
||||
// both with block stride loop iteration
|
||||
auto tmp1 = GatherSimd(source,dimension,shift,0x1,compress,face_idx);
|
||||
auto tmp2 = GatherSimd(source,dimension,shift,0x2,compress,face_idx);
|
||||
auto tmp1 = GatherSimd(source,dimension,shift,0x1,compress,face_idx,point);
|
||||
auto tmp2 = GatherSimd(source,dimension,shift,0x2,compress,face_idx,point);
|
||||
is_same_node = is_same_node && tmp1 && tmp2;
|
||||
splicetime+=usecond();
|
||||
} else {
|
||||
nosplicetime-=usecond();
|
||||
auto tmp1 = Gather(source,dimension,shift,0x1,compress,face_idx);
|
||||
auto tmp2 = Gather(source,dimension,shift,0x2,compress,face_idx);
|
||||
auto tmp1 = Gather(source,dimension,shift,0x1,compress,face_idx,point);
|
||||
auto tmp2 = Gather(source,dimension,shift,0x2,compress,face_idx,point);
|
||||
is_same_node = is_same_node && tmp1 && tmp2;
|
||||
nosplicetime+=usecond();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -518,13 +468,10 @@ public:
|
||||
template<class compressor>
|
||||
void HaloGather(const Lattice<vobj> &source,compressor &compress)
|
||||
{
|
||||
mpi3synctime_g-=usecond();
|
||||
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
||||
mpi3synctime_g+=usecond();
|
||||
|
||||
// conformable(source.Grid(),_grid);
|
||||
assert(source.Grid()==_grid);
|
||||
halogtime-=usecond();
|
||||
|
||||
u_comm_offset=0;
|
||||
|
||||
@ -538,7 +485,6 @@ public:
|
||||
assert(u_comm_offset==_unified_buffer_size);
|
||||
|
||||
accelerator_barrier();
|
||||
halogtime+=usecond();
|
||||
}
|
||||
|
||||
/////////////////////////
|
||||
@ -551,14 +497,70 @@ public:
|
||||
Mergers.resize(0);
|
||||
MergersSHM.resize(0);
|
||||
Packets.resize(0);
|
||||
calls++;
|
||||
CopyReceiveBuffers.resize(0);
|
||||
CachedTransfers.resize(0);
|
||||
}
|
||||
void AddPacket(void *xmit,void * rcv, Integer to,Integer from,Integer bytes){
|
||||
void AddCopy(void *from,void * to, Integer bytes)
|
||||
{
|
||||
CopyReceiveBuffer obj;
|
||||
obj.from_p = from;
|
||||
obj.to_p = to;
|
||||
obj.bytes= bytes;
|
||||
CopyReceiveBuffers.push_back(obj);
|
||||
}
|
||||
void CommsCopy()
|
||||
{
|
||||
// These are device resident MPI buffers.
|
||||
for(int i=0;i<CopyReceiveBuffers.size();i++){
|
||||
cobj *from=(cobj *)CopyReceiveBuffers[i].from_p;
|
||||
cobj *to =(cobj *)CopyReceiveBuffers[i].to_p;
|
||||
Integer words = CopyReceiveBuffers[i].bytes/sizeof(cobj);
|
||||
|
||||
accelerator_forNB(j, words, cobj::Nsimd(), {
|
||||
coalescedWrite(to[j] ,coalescedRead(from [j]));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Integer CheckForDuplicate(Integer direction, Integer OrthogPlane, Integer DestProc, void *recv_buf,Integer lane,Integer bytes,Integer cb)
|
||||
{
|
||||
CachedTransfer obj;
|
||||
obj.direction = direction;
|
||||
obj.OrthogPlane = OrthogPlane;
|
||||
obj.DestProc = DestProc;
|
||||
obj.recv_buf = recv_buf;
|
||||
obj.lane = lane;
|
||||
obj.bytes = bytes;
|
||||
obj.cb = cb;
|
||||
|
||||
for(int i=0;i<CachedTransfers.size();i++){
|
||||
if ( (CachedTransfers[i].direction ==direction)
|
||||
&&(CachedTransfers[i].OrthogPlane==OrthogPlane)
|
||||
&&(CachedTransfers[i].DestProc ==DestProc)
|
||||
&&(CachedTransfers[i].bytes ==bytes)
|
||||
&&(CachedTransfers[i].lane ==lane)
|
||||
&&(CachedTransfers[i].cb ==cb)
|
||||
){
|
||||
|
||||
AddCopy(CachedTransfers[i].recv_buf,recv_buf,bytes);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
CachedTransfers.push_back(obj);
|
||||
return 0;
|
||||
}
|
||||
void AddPacket(void *xmit,void * rcv,
|
||||
Integer to, Integer do_send,
|
||||
Integer from, Integer do_recv,
|
||||
Integer bytes){
|
||||
Packet p;
|
||||
p.send_buf = xmit;
|
||||
p.recv_buf = rcv;
|
||||
p.to_rank = to;
|
||||
p.from_rank= from;
|
||||
p.do_send = do_send;
|
||||
p.do_recv = do_recv;
|
||||
p.bytes = bytes;
|
||||
Packets.push_back(p);
|
||||
}
|
||||
@ -578,22 +580,17 @@ public:
|
||||
mv.push_back(m);
|
||||
}
|
||||
template<class decompressor> void CommsMerge(decompressor decompress) {
|
||||
CommsCopy();
|
||||
CommsMerge(decompress,Mergers,Decompressions);
|
||||
}
|
||||
template<class decompressor> void CommsMergeSHM(decompressor decompress) {
|
||||
mpi3synctime-=usecond();
|
||||
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
||||
mpi3synctime+=usecond();
|
||||
shmmergetime-=usecond();
|
||||
CommsMerge(decompress,MergersSHM,DecompressionsSHM);
|
||||
shmmergetime+=usecond();
|
||||
}
|
||||
|
||||
template<class decompressor>
|
||||
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd) {
|
||||
|
||||
|
||||
mergetime-=usecond();
|
||||
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd)
|
||||
{
|
||||
for(int i=0;i<mm.size();i++){
|
||||
auto mp = &mm[i].mpointer[0];
|
||||
auto vp0= &mm[i].vpointers[0][0];
|
||||
@ -603,9 +600,7 @@ public:
|
||||
decompress.Exchange(mp,vp0,vp1,type,o);
|
||||
});
|
||||
}
|
||||
mergetime+=usecond();
|
||||
|
||||
decompresstime-=usecond();
|
||||
for(int i=0;i<dd.size();i++){
|
||||
auto kp = dd[i].kernel_p;
|
||||
auto mp = dd[i].mpi_p;
|
||||
@ -613,7 +608,6 @@ public:
|
||||
decompress.Decompress(kp,mp,o);
|
||||
});
|
||||
}
|
||||
decompresstime+=usecond();
|
||||
}
|
||||
////////////////////////////////////////
|
||||
// Set up routines
|
||||
@ -650,18 +644,54 @@ public:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Introduce a block structure and switch off comms on boundaries
|
||||
void DirichletBlock(const Coordinate &dirichlet_block)
|
||||
{
|
||||
for(int ii=0;ii<this->_npoints;ii++){
|
||||
int dimension = this->_directions[ii];
|
||||
int displacement = this->_distances[ii];
|
||||
int gd = _grid->_gdimensions[dimension];
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int pd = _grid->_processors [dimension];
|
||||
int pc = _grid->_processor_coor[dimension];
|
||||
int ld = fd/pd;
|
||||
///////////////////////////////////////////
|
||||
// Figure out dirichlet send and receive
|
||||
// on this leg of stencil.
|
||||
///////////////////////////////////////////
|
||||
int comm_dim = _grid->_processors[dimension] >1 ;
|
||||
int block = dirichlet_block[dimension];
|
||||
this->_comms_send[ii] = comm_dim;
|
||||
this->_comms_recv[ii] = comm_dim;
|
||||
if ( block && comm_dim ) {
|
||||
assert(abs(displacement) < ld );
|
||||
|
||||
if( displacement > 0 ) {
|
||||
// High side, low side
|
||||
// | <--B--->|
|
||||
// | | |
|
||||
// noR
|
||||
// noS
|
||||
if ( ( (ld*(pc+1) ) % block ) == 0 ) this->_comms_recv[ii] = 0;
|
||||
if ( ( (ld*pc ) % block ) == 0 ) this->_comms_send[ii] = 0;
|
||||
} else {
|
||||
// High side, low side
|
||||
// | <--B--->|
|
||||
// | | |
|
||||
// noS
|
||||
// noR
|
||||
if ( ( (ld*(pc+1) ) % block ) == 0 ) this->_comms_send[ii] = 0;
|
||||
if ( ( (ld*pc ) % block ) == 0 ) this->_comms_recv[ii] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
CartesianStencil(GridBase *grid,
|
||||
int npoints,
|
||||
int checkerboard,
|
||||
const std::vector<int> &directions,
|
||||
const std::vector<int> &distances,
|
||||
Parameters p)
|
||||
: shm_bytes_thr(npoints),
|
||||
comm_bytes_thr(npoints),
|
||||
comm_enter_thr(npoints),
|
||||
comm_leave_thr(npoints),
|
||||
comm_time_thr(npoints)
|
||||
{
|
||||
face_table_computed=0;
|
||||
_grid = grid;
|
||||
@ -675,8 +705,12 @@ public:
|
||||
this->_simd_layout = _grid->_simd_layout; // copy simd_layout to give access to Accelerator Kernels
|
||||
this->_directions = StencilVector(directions);
|
||||
this->_distances = StencilVector(distances);
|
||||
this->_comms_send.resize(npoints);
|
||||
this->_comms_recv.resize(npoints);
|
||||
this->same_node.resize(npoints);
|
||||
|
||||
if ( p.dirichlet.size() ) DirichletBlock(p.dirichlet); // comms send/recv set up
|
||||
|
||||
_unified_buffer_size=0;
|
||||
surface_list.resize(0);
|
||||
|
||||
@ -693,15 +727,16 @@ public:
|
||||
int displacement = distances[i];
|
||||
int shift = displacement;
|
||||
|
||||
int gd = _grid->_gdimensions[dimension];
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int pd = _grid->_processors [dimension];
|
||||
int ld = gd/pd;
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
int pc = _grid->_processor_coor[dimension];
|
||||
this->_permute_type[point]=_grid->PermuteType(dimension);
|
||||
|
||||
this->_checkerboard = checkerboard;
|
||||
|
||||
//////////////////////////
|
||||
// the permute type
|
||||
//////////////////////////
|
||||
int simd_layout = _grid->_simd_layout[dimension];
|
||||
int comm_dim = _grid->_processors[dimension] >1 ;
|
||||
int splice_dim = _grid->_simd_layout[dimension]>1 && (comm_dim);
|
||||
@ -710,7 +745,6 @@ public:
|
||||
assert ( (rotate_dim && comm_dim) == false) ; // Do not think spread out is supported
|
||||
|
||||
int sshift[2];
|
||||
|
||||
//////////////////////////
|
||||
// Underlying approach. For each local site build
|
||||
// up a table containing the npoint "neighbours" and whether they
|
||||
@ -811,6 +845,7 @@ public:
|
||||
GridBase *grid=_grid;
|
||||
const int Nsimd = grid->Nsimd();
|
||||
|
||||
int comms_recv = this->_comms_recv[point];
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int ld = _grid->_ldimensions[dimension];
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
@ -867,25 +902,32 @@ public:
|
||||
if ( (shiftpm== 1) && (sx<x) && (grid->_processor_coor[dimension]==grid->_processors[dimension]-1) ) {
|
||||
wraparound = 1;
|
||||
}
|
||||
if (!offnode) {
|
||||
|
||||
// Wrap locally dirichlet support case OR node local
|
||||
if ( offnode==0 ) {
|
||||
|
||||
int permute_slice=0;
|
||||
CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
|
||||
|
||||
|
||||
} else {
|
||||
|
||||
if ( comms_recv==0 ) {
|
||||
|
||||
int permute_slice=1;
|
||||
CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
|
||||
|
||||
} else {
|
||||
|
||||
ScatterPlane(point,dimension,x,cbmask,_unified_buffer_size,wraparound); // permute/extract/merge is done in comms phase
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if ( offnode ) {
|
||||
int words = buffer_size;
|
||||
if (cbmask != 0x3) words=words>>1;
|
||||
|
||||
// int rank = grid->_processor;
|
||||
// int recv_from_rank;
|
||||
// int xmit_to_rank;
|
||||
|
||||
int unified_buffer_offset = _unified_buffer_size;
|
||||
_unified_buffer_size += words;
|
||||
|
||||
ScatterPlane(point,dimension,x,cbmask,unified_buffer_offset,wraparound); // permute/extract/merge is done in comms phase
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -984,11 +1026,14 @@ public:
|
||||
}
|
||||
|
||||
template<class compressor>
|
||||
int Gather(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor & compress,int &face_idx)
|
||||
int Gather(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor & compress,int &face_idx, int point)
|
||||
{
|
||||
typedef typename cobj::vector_type vector_type;
|
||||
typedef typename cobj::scalar_type scalar_type;
|
||||
|
||||
int comms_send = this->_comms_send[point] ;
|
||||
int comms_recv = this->_comms_recv[point] ;
|
||||
|
||||
assert(rhs.Grid()==_grid);
|
||||
// conformable(_grid,rhs.Grid());
|
||||
|
||||
@ -1011,78 +1056,93 @@ public:
|
||||
|
||||
int sx = (x+sshift)%rd;
|
||||
int comm_proc = ((x+sshift)/rd)%pd;
|
||||
|
||||
|
||||
if (comm_proc) {
|
||||
|
||||
|
||||
int words = buffer_size;
|
||||
if (cbmask != 0x3) words=words>>1;
|
||||
|
||||
int bytes = words * compress.CommDatumSize();
|
||||
|
||||
int so = sx*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
|
||||
if ( !face_table_computed ) {
|
||||
face_table.resize(face_idx+1);
|
||||
std::vector<std::pair<int,int> > face_table_host ;
|
||||
Gather_plane_table_compute ((GridBase *)_grid,dimension,sx,cbmask,u_comm_offset,face_table_host);
|
||||
face_table[face_idx].resize(face_table_host.size());
|
||||
acceleratorCopyToDevice(&face_table_host[0],
|
||||
&face_table[face_idx][0],
|
||||
face_table[face_idx].size()*sizeof(face_table_host[0]));
|
||||
}
|
||||
int comm_off = u_comm_offset;
|
||||
|
||||
// int rank = _grid->_processor;
|
||||
int recv_from_rank;
|
||||
int xmit_to_rank;
|
||||
cobj *recv_buf;
|
||||
cobj *send_buf;
|
||||
_grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
|
||||
|
||||
assert (xmit_to_rank != _grid->ThisRank());
|
||||
assert (recv_from_rank != _grid->ThisRank());
|
||||
|
||||
cobj *recv_buf;
|
||||
if ( compress.DecompressionStep() ) {
|
||||
recv_buf=u_simd_recv_buf[0];
|
||||
} else {
|
||||
recv_buf=this->u_recv_buf_p;
|
||||
if( comms_send ) {
|
||||
|
||||
if ( !face_table_computed ) {
|
||||
face_table.resize(face_idx+1);
|
||||
std::vector<std::pair<int,int> > face_table_host ;
|
||||
Gather_plane_table_compute ((GridBase *)_grid,dimension,sx,cbmask,comm_off,face_table_host);
|
||||
face_table[face_idx].resize(face_table_host.size());
|
||||
acceleratorCopyToDevice(&face_table_host[0],
|
||||
&face_table[face_idx][0],
|
||||
face_table[face_idx].size()*sizeof(face_table_host[0]));
|
||||
}
|
||||
|
||||
|
||||
if ( compress.DecompressionStep() ) {
|
||||
recv_buf=u_simd_recv_buf[0];
|
||||
} else {
|
||||
recv_buf=this->u_recv_buf_p;
|
||||
}
|
||||
|
||||
send_buf = this->u_send_buf_p; // Gather locally, must send
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Gather locally
|
||||
////////////////////////////////////////////////////////
|
||||
assert(send_buf!=NULL);
|
||||
|
||||
Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,comm_off,so);
|
||||
}
|
||||
|
||||
cobj *send_buf;
|
||||
send_buf = this->u_send_buf_p; // Gather locally, must send
|
||||
int duplicate = CheckForDuplicate(dimension,sx,comm_proc,(void *)&recv_buf[comm_off],0,bytes,cbmask);
|
||||
if ( (!duplicate) ) { // Force comms for now
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Gather locally
|
||||
////////////////////////////////////////////////////////
|
||||
gathertime-=usecond();
|
||||
assert(send_buf!=NULL);
|
||||
Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so); face_idx++;
|
||||
gathertime+=usecond();
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Build a list of things to do after we synchronise GPUs
|
||||
// Start comms now???
|
||||
///////////////////////////////////////////////////////////
|
||||
AddPacket((void *)&send_buf[u_comm_offset],
|
||||
(void *)&recv_buf[u_comm_offset],
|
||||
xmit_to_rank,
|
||||
recv_from_rank,
|
||||
bytes);
|
||||
|
||||
if ( compress.DecompressionStep() ) {
|
||||
AddDecompress(&this->u_recv_buf_p[u_comm_offset],
|
||||
&recv_buf[u_comm_offset],
|
||||
///////////////////////////////////////////////////////////
|
||||
// Build a list of things to do after we synchronise GPUs
|
||||
// Start comms now???
|
||||
///////////////////////////////////////////////////////////
|
||||
AddPacket((void *)&send_buf[comm_off],
|
||||
(void *)&recv_buf[comm_off],
|
||||
xmit_to_rank, comms_send,
|
||||
recv_from_rank, comms_recv,
|
||||
bytes);
|
||||
}
|
||||
|
||||
if ( compress.DecompressionStep() && comms_recv ) {
|
||||
AddDecompress(&this->u_recv_buf_p[comm_off],
|
||||
&recv_buf[comm_off],
|
||||
words,Decompressions);
|
||||
}
|
||||
|
||||
u_comm_offset+=words;
|
||||
face_idx++;
|
||||
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
template<class compressor>
|
||||
int GatherSimd(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx)
|
||||
int GatherSimd(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx,int point)
|
||||
{
|
||||
const int Nsimd = _grid->Nsimd();
|
||||
|
||||
const int maxl =2;// max layout in a direction
|
||||
|
||||
int comms_send = this->_comms_send[point] ;
|
||||
int comms_recv = this->_comms_recv[point] ;
|
||||
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
int ld = _grid->_ldimensions[dimension];
|
||||
@ -1097,7 +1157,6 @@ public:
|
||||
|
||||
|
||||
int permute_type=_grid->PermuteType(dimension);
|
||||
// std::cout << "SimdNew permute type "<<permute_type<<std::endl;
|
||||
|
||||
///////////////////////////////////////////////
|
||||
// Simd direction uses an extract/merge pair
|
||||
@ -1131,8 +1190,9 @@ public:
|
||||
|
||||
if ( any_offnode ) {
|
||||
|
||||
int comm_off = u_comm_offset;
|
||||
for(int i=0;i<maxl;i++){
|
||||
spointers[i] = (cobj *) &u_simd_send_buf[i][u_comm_offset];
|
||||
spointers[i] = (cobj *) &u_simd_send_buf[i][comm_off];
|
||||
}
|
||||
|
||||
int sx = (x+sshift)%rd;
|
||||
@ -1141,18 +1201,17 @@ public:
|
||||
face_table.resize(face_idx+1);
|
||||
std::vector<std::pair<int,int> > face_table_host ;
|
||||
|
||||
Gather_plane_table_compute ((GridBase *)_grid,dimension,sx,cbmask,u_comm_offset,face_table_host);
|
||||
Gather_plane_table_compute ((GridBase *)_grid,dimension,sx,cbmask,comm_off,face_table_host);
|
||||
face_table[face_idx].resize(face_table_host.size());
|
||||
acceleratorCopyToDevice(&face_table_host[0],
|
||||
&face_table[face_idx][0],
|
||||
face_table[face_idx].size()*sizeof(face_table_host[0]));
|
||||
}
|
||||
gathermtime-=usecond();
|
||||
|
||||
Gather_plane_exchange_table(face_table[face_idx],rhs,spointers,dimension,sx,cbmask,compress,permute_type);
|
||||
if ( comms_send )
|
||||
Gather_plane_exchange_table(face_table[face_idx],rhs,spointers,dimension,sx,cbmask,compress,permute_type);
|
||||
face_idx++;
|
||||
|
||||
gathermtime+=usecond();
|
||||
//spointers[0] -- low
|
||||
//spointers[1] -- high
|
||||
|
||||
@ -1169,8 +1228,8 @@ public:
|
||||
int nbr_plane = nbr_ic;
|
||||
assert (sx == nbr_ox);
|
||||
|
||||
auto rp = &u_simd_recv_buf[i ][u_comm_offset];
|
||||
auto sp = &u_simd_send_buf[nbr_plane][u_comm_offset];
|
||||
auto rp = &u_simd_recv_buf[i ][comm_off];
|
||||
auto sp = &u_simd_send_buf[nbr_plane][comm_off];
|
||||
|
||||
if(nbr_proc){
|
||||
|
||||
@ -1181,8 +1240,13 @@ public:
|
||||
|
||||
rpointers[i] = rp;
|
||||
|
||||
AddPacket((void *)sp,(void *)rp,xmit_to_rank,recv_from_rank,bytes);
|
||||
|
||||
int duplicate = CheckForDuplicate(dimension,sx,nbr_proc,(void *)rp,i,bytes,cbmask);
|
||||
if ( !duplicate ) {
|
||||
AddPacket((void *)sp,(void *)rp,
|
||||
xmit_to_rank,comms_send,
|
||||
recv_from_rank,comms_recv,
|
||||
bytes);
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
@ -1191,9 +1255,12 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
AddMerge(&this->u_recv_buf_p[u_comm_offset],rpointers,reduced_buffer_size,permute_type,Mergers);
|
||||
if ( comms_recv ) {
|
||||
AddMerge(&this->u_recv_buf_p[comm_off],rpointers,reduced_buffer_size,permute_type,Mergers);
|
||||
}
|
||||
|
||||
u_comm_offset +=buffer_size;
|
||||
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -55,7 +55,7 @@ template<class vtype, int N> accelerator_inline iVector<vtype, N> Exponentiate(c
|
||||
|
||||
|
||||
// Specialisation: Cayley-Hamilton exponential for SU(3)
|
||||
#ifndef GRID_CUDA
|
||||
#ifndef GRID_ACCELERATED
|
||||
template<class vtype, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0>::type * =nullptr>
|
||||
accelerator_inline iMatrix<vtype,3> Exponentiate(const iMatrix<vtype,3> &arg, RealD alpha , Integer Nexp = DEFAULT_MAT_EXP )
|
||||
{
|
||||
|
@ -441,7 +441,7 @@ inline void acceleratorMemSet(void *base,int value,size_t bytes) { hipMemset(bas
|
||||
|
||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) // Asynch
|
||||
{
|
||||
hipMemcpyAsync(to,from,bytes, hipMemcpyDeviceToDevice,copyStream);
|
||||
hipMemcpy(to,from,bytes, hipMemcpyDeviceToDevice);
|
||||
}
|
||||
inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream); };
|
||||
|
||||
@ -461,6 +461,8 @@ inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream);
|
||||
accelerator_for2dNB(iter1, num1, iter2, num2, nsimd, { __VA_ARGS__ } ); \
|
||||
accelerator_barrier(dummy);
|
||||
|
||||
#define GRID_ACCELERATED
|
||||
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////
|
||||
|
232
HMC/Mobius2p1f_DD_RHMC.cc
Normal file
232
HMC/Mobius2p1f_DD_RHMC.cc
Normal file
@ -0,0 +1,232 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/Test_hmc_EODWFRatio.cc
|
||||
|
||||
Copyright (C) 2015-2016
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
using namespace Grid;
|
||||
|
||||
Grid_init(&argc, &argv);
|
||||
int threads = GridThread::GetThreads();
|
||||
|
||||
// Typedefs to simplify notation
|
||||
typedef WilsonImplR FermionImplPolicy;
|
||||
typedef MobiusFermionR FermionAction;
|
||||
typedef typename FermionAction::FermionField FermionField;
|
||||
|
||||
typedef Grid::XmlReader Serialiser;
|
||||
|
||||
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
|
||||
IntegratorParameters MD;
|
||||
// typedef GenericHMCRunner<LeapFrog> HMCWrapper;
|
||||
// MD.name = std::string("Leap Frog");
|
||||
// typedef GenericHMCRunner<ForceGradient> HMCWrapper;
|
||||
// MD.name = std::string("Force Gradient");
|
||||
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
|
||||
MD.name = std::string("MinimumNorm2");
|
||||
MD.MDsteps = 4;
|
||||
MD.trajL = 1.0;
|
||||
|
||||
HMCparameters HMCparams;
|
||||
HMCparams.StartTrajectory = 8;
|
||||
HMCparams.Trajectories = 200;
|
||||
HMCparams.NoMetropolisUntil= 0;
|
||||
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
|
||||
// HMCparams.StartingType =std::string("ColdStart");
|
||||
HMCparams.StartingType =std::string("CheckpointStart");
|
||||
HMCparams.MD = MD;
|
||||
HMCWrapper TheHMC(HMCparams);
|
||||
|
||||
// Grid from the command line arguments --grid and --mpi
|
||||
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
|
||||
|
||||
CheckpointerParameters CPparams;
|
||||
CPparams.config_prefix = "ckpoint_EODWF_lat";
|
||||
CPparams.rng_prefix = "ckpoint_EODWF_rng";
|
||||
CPparams.saveInterval = 1;
|
||||
CPparams.format = "IEEE64BIG";
|
||||
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
|
||||
|
||||
RNGModuleParameters RNGpar;
|
||||
RNGpar.serial_seeds = "1 2 3 4 5";
|
||||
RNGpar.parallel_seeds = "6 7 8 9 10";
|
||||
TheHMC.Resources.SetRNGSeeds(RNGpar);
|
||||
|
||||
// Construct observables
|
||||
// here there is too much indirection
|
||||
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
|
||||
TheHMC.Resources.AddObservable<PlaqObs>();
|
||||
//////////////////////////////////////////////
|
||||
|
||||
const int Ls = 16;
|
||||
Real beta = 2.13;
|
||||
Real light_mass = 0.01;
|
||||
Real strange_mass = 0.04;
|
||||
Real pv_mass = 1.0;
|
||||
RealD M5 = 1.8;
|
||||
RealD b = 1.0;
|
||||
RealD c = 0.0;
|
||||
|
||||
// FIXME:
|
||||
// Same in MC and MD
|
||||
// Need to mix precision too
|
||||
OneFlavourRationalParams OFRp;
|
||||
OFRp.lo = 4.0e-3;
|
||||
OFRp.hi = 30.0;
|
||||
OFRp.MaxIter = 10000;
|
||||
OFRp.tolerance= 1.0e-10;
|
||||
OFRp.degree = 16;
|
||||
OFRp.precision= 50;
|
||||
|
||||
std::vector<Real> hasenbusch({ 0.01, 0.04, 0.2 , pv_mass });
|
||||
std::vector<bool> dirichlet ({ true, true, true });
|
||||
|
||||
auto GridPtr = TheHMC.Resources.GetCartesian();
|
||||
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Domain decomposed
|
||||
////////////////////////////////////////////////////////////////
|
||||
Coordinate latt4 = GridPtr->GlobalDimensions();
|
||||
Coordinate mpi = GridPtr->ProcessorGrid();
|
||||
Coordinate shm;
|
||||
|
||||
GlobalSharedMemory::GetShmDims(mpi,shm);
|
||||
|
||||
Coordinate CommDim(Nd);
|
||||
for(int d=0;d<Nd;d++) CommDim[d]= (mpi[d]/shm[d])>1 ? 1 : 0;
|
||||
|
||||
Coordinate Dirichlet(Nd+1,0);
|
||||
Dirichlet[1] = CommDim[0]*latt4[0]/mpi[0] * shm[0];
|
||||
Dirichlet[2] = CommDim[1]*latt4[1]/mpi[1] * shm[1];
|
||||
Dirichlet[3] = CommDim[2]*latt4[2]/mpi[2] * shm[2];
|
||||
Dirichlet[4] = CommDim[3]*latt4[3]/mpi[3] * shm[3];
|
||||
|
||||
Coordinate Block4(Nd);
|
||||
Block4[0] = Dirichlet[1];
|
||||
Block4[1] = Dirichlet[2];
|
||||
Block4[2] = Dirichlet[3];
|
||||
Block4[3] = Dirichlet[4];
|
||||
TheHMC.Resources.SetMomentumFilter(new DDHMCFilter<WilsonImplR::Field>(Block4));
|
||||
|
||||
//////////////////////////
|
||||
// Fermion Grid
|
||||
//////////////////////////
|
||||
auto FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtr);
|
||||
auto FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtr);
|
||||
|
||||
IwasakiGaugeActionR GaugeAction(beta);
|
||||
|
||||
// temporarily need a gauge field
|
||||
LatticeGaugeField U(GridPtr);
|
||||
|
||||
// These lines are unecessary if BC are all periodic
|
||||
std::vector<Complex> boundary = {1,1,1,-1};
|
||||
FermionAction::ImplParams Params(boundary);
|
||||
|
||||
double StoppingCondition = 1e-10;
|
||||
double MaxCGIterations = 30000;
|
||||
ConjugateGradient<FermionField> CG(StoppingCondition,MaxCGIterations);
|
||||
|
||||
////////////////////////////////////
|
||||
// Collect actions
|
||||
////////////////////////////////////
|
||||
ActionLevel<HMCWrapper::Field> Level1(1);
|
||||
ActionLevel<HMCWrapper::Field> Level2(2);
|
||||
ActionLevel<HMCWrapper::Field> Level3(8);
|
||||
|
||||
////////////////////////////////////
|
||||
// Strange action
|
||||
////////////////////////////////////
|
||||
FermionAction StrangeOp (U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,strange_mass,M5,b,c, Params);
|
||||
FermionAction StrangePauliVillarsOp(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,pv_mass, M5,b,c, Params);
|
||||
|
||||
OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> StrangePseudoFermion(StrangePauliVillarsOp,StrangeOp,OFRp);
|
||||
// Level1.push_back(&StrangePseudoFermion);
|
||||
|
||||
////////////////////////////////////
|
||||
// up down action
|
||||
////////////////////////////////////
|
||||
std::vector<Real> light_den;
|
||||
std::vector<Real> light_num;
|
||||
std::vector<int> dirichlet_den;
|
||||
std::vector<int> dirichlet_num;
|
||||
|
||||
int n_hasenbusch = hasenbusch.size();
|
||||
light_den.push_back(light_mass);
|
||||
dirichlet_den.push_back(0);
|
||||
for(int h=0;h<n_hasenbusch;h++){
|
||||
light_den.push_back(hasenbusch[h]);
|
||||
light_num.push_back(hasenbusch[h]);
|
||||
dirichlet_num.push_back(1);
|
||||
dirichlet_den.push_back(1);
|
||||
}
|
||||
light_num.push_back(pv_mass);
|
||||
dirichlet_num.push_back(0);
|
||||
|
||||
std::vector<FermionAction *> Numerators;
|
||||
std::vector<FermionAction *> Denominators;
|
||||
std::vector<TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy> *> Quotients;
|
||||
|
||||
for(int h=0;h<n_hasenbusch+1;h++){
|
||||
std::cout << GridLogMessage << " 2f quotient Action "<< light_num[h]<< " (" << dirichlet_num[h]
|
||||
<<") / " << light_den[h]<< " (" << dirichlet_den[h]<<")"<< std::endl;
|
||||
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, Params));
|
||||
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, Params));
|
||||
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],CG,CG));
|
||||
if ( dirichlet_den[h]==1) Denominators[h]->DirichletBlock(Dirichlet);
|
||||
if ( dirichlet_num[h]==1) Numerators[h]->DirichletBlock(Dirichlet);
|
||||
}
|
||||
|
||||
int nquo=Quotients.size();
|
||||
Level1.push_back(Quotients[0]);
|
||||
Level1.push_back(Quotients[nquo-1]);
|
||||
for(int h=1;h<nquo-1;h++){
|
||||
Level2.push_back(Quotients[h]);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Gauge action
|
||||
/////////////////////////////////////////////////////////////
|
||||
Level3.push_back(&GaugeAction);
|
||||
TheHMC.TheAction.push_back(Level1);
|
||||
TheHMC.TheAction.push_back(Level2);
|
||||
TheHMC.TheAction.push_back(Level3);
|
||||
std::cout << GridLogMessage << " Action complete "<< std::endl;
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
|
||||
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
|
||||
TheHMC.Run(); // no smearing
|
||||
|
||||
Grid_finalize();
|
||||
} // main
|
||||
|
||||
|
||||
|
@ -217,9 +217,9 @@ int main (int argc, char ** argv)
|
||||
dbytes+=
|
||||
Grid.StencilSendToRecvFromBegin(requests,
|
||||
(void *)&xbuf[mu][0],
|
||||
xmit_to_rank,
|
||||
xmit_to_rank,1,
|
||||
(void *)&rbuf[mu][0],
|
||||
recv_from_rank,
|
||||
recv_from_rank,1,
|
||||
bytes,mu);
|
||||
|
||||
comm_proc = mpi_layout[mu]-1;
|
||||
@ -228,9 +228,9 @@ int main (int argc, char ** argv)
|
||||
dbytes+=
|
||||
Grid.StencilSendToRecvFromBegin(requests,
|
||||
(void *)&xbuf[mu+4][0],
|
||||
xmit_to_rank,
|
||||
xmit_to_rank,1,
|
||||
(void *)&rbuf[mu+4][0],
|
||||
recv_from_rank,
|
||||
recv_from_rank,1,
|
||||
bytes,mu+4);
|
||||
|
||||
}
|
||||
@ -309,9 +309,9 @@ int main (int argc, char ** argv)
|
||||
dbytes+=
|
||||
Grid.StencilSendToRecvFromBegin(requests,
|
||||
(void *)&xbuf[mu][0],
|
||||
xmit_to_rank,
|
||||
xmit_to_rank,1,
|
||||
(void *)&rbuf[mu][0],
|
||||
recv_from_rank,
|
||||
recv_from_rank,1,
|
||||
bytes,mu);
|
||||
Grid.StencilSendToRecvFromComplete(requests,mu);
|
||||
requests.resize(0);
|
||||
@ -322,9 +322,9 @@ int main (int argc, char ** argv)
|
||||
dbytes+=
|
||||
Grid.StencilSendToRecvFromBegin(requests,
|
||||
(void *)&xbuf[mu+4][0],
|
||||
xmit_to_rank,
|
||||
xmit_to_rank,1,
|
||||
(void *)&rbuf[mu+4][0],
|
||||
recv_from_rank,
|
||||
recv_from_rank,1,
|
||||
bytes,mu+4);
|
||||
Grid.StencilSendToRecvFromComplete(requests,mu+4);
|
||||
requests.resize(0);
|
||||
@ -411,8 +411,8 @@ int main (int argc, char ** argv)
|
||||
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
|
||||
}
|
||||
int tid = omp_get_thread_num();
|
||||
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,
|
||||
(void *)&rbuf[dir][0], recv_from_rank, bytes,tid);
|
||||
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,1,
|
||||
(void *)&rbuf[dir][0], recv_from_rank,1, bytes,tid);
|
||||
|
||||
thread_critical { dbytes+=tbytes; }
|
||||
}
|
||||
|
@ -32,18 +32,18 @@
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
|
||||
template<class d>
|
||||
struct scal {
|
||||
d internal;
|
||||
////////////////////////
|
||||
/// Move to domains ////
|
||||
////////////////////////
|
||||
|
||||
Gamma::Algebra Gmu [] = {
|
||||
Gamma::Algebra::GammaX,
|
||||
Gamma::Algebra::GammaY,
|
||||
Gamma::Algebra::GammaZ,
|
||||
Gamma::Algebra::GammaT
|
||||
};
|
||||
|
||||
Gamma::Algebra Gmu [] = {
|
||||
Gamma::Algebra::GammaX,
|
||||
Gamma::Algebra::GammaY,
|
||||
Gamma::Algebra::GammaZ,
|
||||
Gamma::Algebra::GammaT
|
||||
};
|
||||
|
||||
void Benchmark(int Ls, Coordinate Dirichlet);
|
||||
|
||||
int main (int argc, char ** argv)
|
||||
{
|
||||
@ -52,24 +52,82 @@ int main (int argc, char ** argv)
|
||||
|
||||
int threads = GridThread::GetThreads();
|
||||
|
||||
Coordinate latt4 = GridDefaultLatt();
|
||||
int Ls=16;
|
||||
for(int i=0;i<argc;i++)
|
||||
for(int i=0;i<argc;i++) {
|
||||
if(std::string(argv[i]) == "-Ls"){
|
||||
std::stringstream ss(argv[i+1]); ss >> Ls;
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////
|
||||
// With comms
|
||||
//////////////////
|
||||
Coordinate Dirichlet(Nd+1,0);
|
||||
|
||||
std::cout << "\n\n\n\n\n\n" <<std::endl;
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
std::cout << GridLogMessage<< " Testing with full communication " <<std::endl;
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
|
||||
Benchmark(Ls,Dirichlet);
|
||||
|
||||
//////////////////
|
||||
// Domain decomposed
|
||||
//////////////////
|
||||
Coordinate latt4 = GridDefaultLatt();
|
||||
Coordinate mpi = GridDefaultMpi();
|
||||
Coordinate CommDim(Nd);
|
||||
Coordinate shm;
|
||||
GlobalSharedMemory::GetShmDims(mpi,shm);
|
||||
|
||||
|
||||
//////////////////////
|
||||
// Node level
|
||||
//////////////////////
|
||||
std::cout << "\n\n\n\n\n\n" <<std::endl;
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
std::cout << GridLogMessage<< " Testing without internode communication " <<std::endl;
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
|
||||
for(int d=0;d<Nd;d++) CommDim[d]= (mpi[d]/shm[d])>1 ? 1 : 0;
|
||||
Dirichlet[0] = 0;
|
||||
Dirichlet[1] = CommDim[0]*latt4[0]/mpi[0] * shm[0];
|
||||
Dirichlet[2] = CommDim[1]*latt4[1]/mpi[1] * shm[1];
|
||||
Dirichlet[3] = CommDim[2]*latt4[2]/mpi[2] * shm[2];
|
||||
Dirichlet[4] = CommDim[3]*latt4[3]/mpi[3] * shm[3];
|
||||
|
||||
Benchmark(Ls,Dirichlet);
|
||||
|
||||
std::cout << "\n\n\n\n\n\n" <<std::endl;
|
||||
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
std::cout << GridLogMessage<< " Testing without intranode communication " <<std::endl;
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
|
||||
for(int d=0;d<Nd;d++) CommDim[d]= mpi[d]>1 ? 1 : 0;
|
||||
Dirichlet[0] = 0;
|
||||
Dirichlet[1] = CommDim[0]*latt4[0]/mpi[0];
|
||||
Dirichlet[2] = CommDim[1]*latt4[1]/mpi[1];
|
||||
Dirichlet[3] = CommDim[2]*latt4[2]/mpi[2];
|
||||
Dirichlet[4] = CommDim[3]*latt4[3]/mpi[3];
|
||||
|
||||
Benchmark(Ls,Dirichlet);
|
||||
|
||||
Grid_finalize();
|
||||
exit(0);
|
||||
}
|
||||
void Benchmark(int Ls, Coordinate Dirichlet)
|
||||
{
|
||||
Coordinate latt4 = GridDefaultLatt();
|
||||
GridLogLayout();
|
||||
|
||||
long unsigned int single_site_flops = 8*Nc*(7+16*Nc);
|
||||
|
||||
|
||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()),GridDefaultMpi());
|
||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
||||
|
||||
std::cout << GridLogMessage << "Making s innermost grids"<<std::endl;
|
||||
GridCartesian * sUGrid = SpaceTimeGrid::makeFourDimDWFGrid(GridDefaultLatt(),GridDefaultMpi());
|
||||
GridRedBlackCartesian * sUrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(sUGrid);
|
||||
GridCartesian * sFGrid = SpaceTimeGrid::makeFiveDimDWFGrid(Ls,UGrid);
|
||||
@ -80,9 +138,9 @@ int main (int argc, char ** argv)
|
||||
|
||||
std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl;
|
||||
GridParallelRNG RNG4(UGrid); RNG4.SeedUniqueString(std::string("The 4D RNG"));
|
||||
|
||||
std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl;
|
||||
GridParallelRNG RNG5(FGrid); RNG5.SeedUniqueString(std::string("The 5D RNG"));
|
||||
std::cout << GridLogMessage << "Initialised RNGs" << std::endl;
|
||||
|
||||
LatticeFermionF src (FGrid); random(RNG5,src);
|
||||
#if 0
|
||||
@ -100,7 +158,6 @@ int main (int argc, char ** argv)
|
||||
src = src*N2;
|
||||
#endif
|
||||
|
||||
|
||||
LatticeFermionF result(FGrid); result=Zero();
|
||||
LatticeFermionF ref(FGrid); ref=Zero();
|
||||
LatticeFermionF tmp(FGrid);
|
||||
@ -108,29 +165,31 @@ int main (int argc, char ** argv)
|
||||
|
||||
std::cout << GridLogMessage << "Drawing gauge field" << std::endl;
|
||||
LatticeGaugeFieldF Umu(UGrid);
|
||||
LatticeGaugeFieldF UmuCopy(UGrid);
|
||||
SU<Nc>::HotConfiguration(RNG4,Umu);
|
||||
UmuCopy=Umu;
|
||||
std::cout << GridLogMessage << "Random gauge initialised " << std::endl;
|
||||
#if 0
|
||||
Umu=1.0;
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
LatticeColourMatrixF ttmp(UGrid);
|
||||
ttmp = PeekIndex<LorentzIndex>(Umu,mu);
|
||||
// if (mu !=2 ) ttmp = 0;
|
||||
// ttmp = ttmp* pow(10.0,mu);
|
||||
PokeIndex<LorentzIndex>(Umu,ttmp,mu);
|
||||
}
|
||||
std::cout << GridLogMessage << "Forced to diagonal " << std::endl;
|
||||
#endif
|
||||
|
||||
////////////////////////////////////
|
||||
// Apply BCs
|
||||
////////////////////////////////////
|
||||
Coordinate Block(4);
|
||||
for(int d=0;d<4;d++) Block[d]= Dirichlet[d+1];
|
||||
|
||||
std::cout << GridLogMessage << "Applying BCs for Dirichlet Block5 " << Dirichlet << std::endl;
|
||||
std::cout << GridLogMessage << "Applying BCs for Dirichlet Block4 " << Block << std::endl;
|
||||
|
||||
DirichletFilter<LatticeGaugeFieldF> Filter(Block);
|
||||
Filter.applyFilter(Umu);
|
||||
|
||||
////////////////////////////////////
|
||||
// Naive wilson implementation
|
||||
////////////////////////////////////
|
||||
// replicate across fifth dimension
|
||||
// LatticeGaugeFieldF Umu5d(FGrid);
|
||||
std::vector<LatticeColourMatrixF> U(4,UGrid);
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
|
||||
}
|
||||
|
||||
std::cout << GridLogMessage << "Setting up Cshift based reference " << std::endl;
|
||||
|
||||
if (1)
|
||||
@ -190,12 +249,15 @@ int main (int argc, char ** argv)
|
||||
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
||||
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
|
||||
|
||||
DomainWallFermionF Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
||||
DomainWallFermionF::ImplParams p;
|
||||
p.dirichlet=Dirichlet;
|
||||
DomainWallFermionF Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,p);
|
||||
Dw.ImportGauge(Umu);
|
||||
|
||||
int ncall =300;
|
||||
|
||||
if (1) {
|
||||
FGrid->Barrier();
|
||||
Dw.ZeroCounters();
|
||||
Dw.Dhop(src,result,0);
|
||||
std::cout<<GridLogMessage<<"Called warmup"<<std::endl;
|
||||
double t0=usecond();
|
||||
@ -220,29 +282,20 @@ int main (int argc, char ** argv)
|
||||
double data_mem = (volume * (2*Nd+1)*Nd*Nc + (volume/Ls) *2*Nd*Nc*Nc) * simdwidth / nsimd * ncall / (1024.*1024.*1024.);
|
||||
|
||||
std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
||||
// std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
|
||||
// std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl;
|
||||
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
||||
std::cout<<GridLogMessage << "mflop/s per node = "<< flops/(t1-t0)/NN<<std::endl;
|
||||
std::cout<<GridLogMessage << "RF GiB/s (base 2) = "<< 1000000. * data_rf/((t1-t0))<<std::endl;
|
||||
std::cout<<GridLogMessage << "mem GiB/s (base 2) = "<< 1000000. * data_mem/((t1-t0))<<std::endl;
|
||||
// std::cout<<GridLogMessage << "RF GiB/s (base 2) = "<< 1000000. * data_rf/((t1-t0))<<std::endl;
|
||||
// std::cout<<GridLogMessage << "mem GiB/s (base 2) = "<< 1000000. * data_mem/((t1-t0))<<std::endl;
|
||||
err = ref-result;
|
||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||
//exit(0);
|
||||
|
||||
if(( norm2(err)>1.0e-4) ) {
|
||||
/*
|
||||
std::cout << "RESULT\n " << result<<std::endl;
|
||||
std::cout << "REF \n " << ref <<std::endl;
|
||||
std::cout << "ERR \n " << err <<std::endl;
|
||||
*/
|
||||
std::cout<<GridLogMessage << "WRONG RESULT" << std::endl;
|
||||
FGrid->Barrier();
|
||||
exit(-1);
|
||||
}
|
||||
assert (norm2(err)< 1.0e-4 );
|
||||
Dw.Report();
|
||||
}
|
||||
|
||||
if (1)
|
||||
@ -286,21 +339,20 @@ int main (int argc, char ** argv)
|
||||
}
|
||||
ref = -0.5*ref;
|
||||
}
|
||||
// dump=1;
|
||||
Dw.Dhop(src,result,1);
|
||||
|
||||
Dw.Dhop(src,result,DaggerYes);
|
||||
|
||||
std::cout << GridLogMessage << "----------------------------------------------------------------" << std::endl;
|
||||
std::cout << GridLogMessage << "Compare to naive wilson implementation Dag to verify correctness" << std::endl;
|
||||
std::cout << GridLogMessage << "----------------------------------------------------------------" << std::endl;
|
||||
|
||||
std::cout<<GridLogMessage << "Called DwDag"<<std::endl;
|
||||
std::cout<<GridLogMessage << "norm dag result "<< norm2(result)<<std::endl;
|
||||
std::cout<<GridLogMessage << "norm dag ref "<< norm2(ref)<<std::endl;
|
||||
err = ref-result;
|
||||
std::cout<<GridLogMessage << "norm dag diff "<< norm2(err)<<std::endl;
|
||||
if((norm2(err)>1.0e-4)){
|
||||
/*
|
||||
std::cout<< "DAG RESULT\n " <<ref << std::endl;
|
||||
std::cout<< "DAG sRESULT\n " <<result << std::endl;
|
||||
std::cout<< "DAG ERR \n " << err <<std::endl;
|
||||
*/
|
||||
}
|
||||
assert((norm2(err)<1.0e-4));
|
||||
|
||||
LatticeFermionF src_e (FrbGrid);
|
||||
LatticeFermionF src_o (FrbGrid);
|
||||
LatticeFermionF r_e (FrbGrid);
|
||||
@ -330,7 +382,6 @@ int main (int argc, char ** argv)
|
||||
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
||||
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
||||
{
|
||||
Dw.ZeroCounters();
|
||||
FGrid->Barrier();
|
||||
Dw.DhopEO(src_o,r_e,DaggerNo);
|
||||
double t0=usecond();
|
||||
@ -352,7 +403,6 @@ int main (int argc, char ** argv)
|
||||
std::cout<<GridLogMessage << "Deo mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||
std::cout<<GridLogMessage << "Deo mflop/s per rank "<< flops/(t1-t0)/NP<<std::endl;
|
||||
std::cout<<GridLogMessage << "Deo mflop/s per node "<< flops/(t1-t0)/NN<<std::endl;
|
||||
Dw.Report();
|
||||
}
|
||||
Dw.DhopEO(src_o,r_e,DaggerNo);
|
||||
Dw.DhopOE(src_e,r_o,DaggerNo);
|
||||
@ -367,13 +417,7 @@ int main (int argc, char ** argv)
|
||||
|
||||
err = r_eo-result;
|
||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||
if((norm2(err)>1.0e-4)){
|
||||
/*
|
||||
std::cout<< "Deo RESULT\n " <<r_eo << std::endl;
|
||||
std::cout<< "Deo REF\n " <<result << std::endl;
|
||||
std::cout<< "Deo ERR \n " << err <<std::endl;
|
||||
*/
|
||||
}
|
||||
assert(norm2(err)<1.0e-4);
|
||||
|
||||
pickCheckerboard(Even,src_e,err);
|
||||
pickCheckerboard(Odd,src_o,err);
|
||||
@ -382,6 +426,4 @@ int main (int argc, char ** argv)
|
||||
|
||||
assert(norm2(src_e)<1.0e-4);
|
||||
assert(norm2(src_o)<1.0e-4);
|
||||
Grid_finalize();
|
||||
exit(0);
|
||||
}
|
||||
|
@ -93,14 +93,14 @@ template<class Field> class FreeLaplacianStencil : public SparseMatrixBase<Field
|
||||
{
|
||||
public:
|
||||
typedef typename Field::vector_object siteObject;
|
||||
typedef CartesianStencil<siteObject, siteObject, int> StencilImpl;
|
||||
typedef CartesianStencil<siteObject, siteObject, SimpleStencilParams> StencilImpl;
|
||||
|
||||
GridBase *grid;
|
||||
StencilImpl Stencil;
|
||||
SimpleCompressor<siteObject> Compressor;
|
||||
|
||||
FreeLaplacianStencil(GridBase *_grid)
|
||||
: Stencil (_grid,6,Even,directions,displacements,0), grid(_grid)
|
||||
: Stencil (_grid,6,Even,directions,displacements,SimpleStencilParams()), grid(_grid)
|
||||
{ };
|
||||
|
||||
virtual GridBase *Grid(void) { return grid; };
|
||||
@ -168,7 +168,8 @@ public:
|
||||
typedef iImplDoubledGaugeField<Simd> SiteDoubledGaugeField;
|
||||
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||
|
||||
typedef CartesianStencil<siteObject, siteObject, int> StencilImpl;
|
||||
typedef CartesianStencil<siteObject, siteObject,SimpleStencilParams> StencilImpl;
|
||||
SimpleStencilParams p;
|
||||
|
||||
GridBase *grid;
|
||||
StencilImpl Stencil;
|
||||
@ -177,7 +178,7 @@ public:
|
||||
CovariantLaplacianStencil(GaugeField &Umu)
|
||||
:
|
||||
grid(Umu.Grid()),
|
||||
Stencil (grid,6,Even,directions,displacements,0),
|
||||
Stencil (grid,6,Even,directions,displacements,p),
|
||||
Uds(grid)
|
||||
{
|
||||
for (int mu = 0; mu < Nd; mu++) {
|
||||
|
@ -3,28 +3,28 @@
|
||||
#SBATCH -A LGT104
|
||||
#SBATCH -t 01:00:00
|
||||
##SBATCH -U openmpThu
|
||||
##SBATCH -p ecp
|
||||
#SBATCH -J DWF
|
||||
#SBATCH -o DWF.%J
|
||||
#SBATCH -e DWF.%J
|
||||
#SBATCH -N 1
|
||||
#SBATCH -n 1
|
||||
#SBATCH --exclusive
|
||||
#SBATCH -n 8
|
||||
#SBATCH --exclusive
|
||||
#SBATCH --gpu-bind=map_gpu:0,1,2,3,7,6,5,4
|
||||
|
||||
DIR=.
|
||||
module list
|
||||
#export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
export OMP_NUM_THREADS=1
|
||||
|
||||
AT=8
|
||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||
|
||||
PARAMS=" --accelerator-threads ${AT} --grid 24.24.24.24 --shm-mpi 0 --mpi 1.1.1.1"
|
||||
|
||||
srun --gpus-per-task 1 -n1 ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||
PARAMS=" --accelerator-threads 16 --grid 32.32.32.256 --mpi 1.1.1.8 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
echo $PARAMS
|
||||
srun --gpus-per-task 1 -n8 ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||
|
||||
|
||||
|
||||
|
@ -6,22 +6,43 @@
|
||||
#SBATCH -J DWF
|
||||
#SBATCH -o DWF.%J
|
||||
#SBATCH -e DWF.%J
|
||||
#SBATCH -N 1
|
||||
#SBATCH -n 8
|
||||
#SBATCH -N 8
|
||||
#SBATCH -n 64
|
||||
#SBATCH --exclusive
|
||||
#SBATCH --gpu-bind=map_gpu:0,1,2,3,7,6,5,4
|
||||
|
||||
DIR=.
|
||||
module list
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
export OMP_NUM_THREADS=1
|
||||
|
||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||
PARAMS=" --accelerator-threads 8 --grid 32.64.64.64 --mpi 1.2.2.2 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
|
||||
srun --gpus-per-task 1 -n8 ./mpiwrapper.sh ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||
PARAMS=" --accelerator-threads 16 --grid 64.64.64.256 --mpi 2.2.2.8 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
echo $PARAMS
|
||||
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_dwf_fp32 $PARAMS > dwf.64.64.64.256.8node
|
||||
|
||||
|
||||
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 4.4.4.1 --comms-overlap --shm 2048 --shm-mpi 1"
|
||||
echo $PARAMS
|
||||
srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_dwf_fp32 $PARAMS > dwf.64.64.64.32.8node
|
||||
|
||||
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 4.4.4.1 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
echo $PARAMS
|
||||
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_dwf_fp32 $PARAMS > dwf.64.64.64.32.8node.shm0
|
||||
|
||||
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 2.2.2.8 --comms-overlap --shm 2048 --shm-mpi 1"
|
||||
echo $PARAMS
|
||||
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_ITT $PARAMS > itt.8node
|
||||
|
||||
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 2.2.2.8 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
echo $PARAMS
|
||||
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_ITT $PARAMS > itt.8node_shm0
|
||||
|
||||
|
||||
|
||||
|
@ -1,10 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
lrank=$SLURM_LOCALID
|
||||
lgpu=(0 1 2 3 7 6 5 4)
|
||||
|
||||
export ROCR_VISIBLE_DEVICES=$SLURM_LOCALID
|
||||
export ROCR_VISIBLE_DEVICES=${lgpu[$lrank]}
|
||||
|
||||
echo "`hostname` - $lrank device=$ROCR_VISIBLE_DEVICES binding=$BINDING"
|
||||
echo "`hostname` - $lrank device=$ROCR_VISIBLE_DEVICES "
|
||||
|
||||
$*
|
||||
|
||||
|
@ -1,9 +1,14 @@
|
||||
DIR=`pwd`
|
||||
PREFIX=$DIR/../Prequisites/install/
|
||||
../../configure \
|
||||
--enable-comms=mpi \
|
||||
--enable-simd=GPU \
|
||||
--enable-shm=nvlink \
|
||||
--enable-gen-simd-width=64 \
|
||||
--enable-accelerator=cuda \
|
||||
--enable-setdevice \
|
||||
--disable-accelerator-cshift \
|
||||
--with-gmp=$PREFIX \
|
||||
--disable-fermion-reps \
|
||||
--disable-unified \
|
||||
--disable-gparity \
|
||||
|
@ -1,24 +1,27 @@
|
||||
#!/bin/bash
|
||||
#SBATCH -A mp13
|
||||
#SBATCH -A m3886_g
|
||||
#SBATCH -C gpu
|
||||
#SBATCH -q regular
|
||||
#SBATCH -q debug
|
||||
#SBATCH -t 0:20:00
|
||||
#SBATCH -n 16
|
||||
#SBATCH --ntasks-per-node=4
|
||||
#SBATCH -c 32
|
||||
#SBATCH --exclusive
|
||||
#SBATCH -N 1
|
||||
#SBATCH -n 4
|
||||
#SBATCH --ntasks-per-node=4
|
||||
#SBATCH --gpus-per-task=1
|
||||
#SBATCH --gpu-bind=map_gpu:0,1,2,3
|
||||
#SBATCH --exclusive
|
||||
#SBATCH --gpu-bind=none
|
||||
|
||||
export SLURM_CPU_BIND="cores"
|
||||
export MPICH_RDMA_ENABLED_CUDA=1
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
srun ./benchmarks/Benchmark_comms_host_device --mpi 2.2.2.2 --accelerator-threads 8 > comms.4node
|
||||
export MPICH_RDMA_ENABLED_CUDA=1
|
||||
export MPICH_GPU_IPC_ENABLED=1
|
||||
export MPICH_GPU_EAGER_REGISTER_HOST_MEM=0
|
||||
export MPICH_GPU_NO_ASYNC_MEMCPY=0
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
|
||||
OPT="--comms-overlap --comms-concurrent --shm-mpi 0"
|
||||
srun ./benchmarks/Benchmark_dwf_fp32 --mpi 2.2.2.2 --grid 64.64.64.64 --accelerator-threads 8 --shm 2048 $OPT > dwf.64.64.64.64.4node.opt0
|
||||
srun ./benchmarks/Benchmark_dwf_fp32 --mpi 2.2.2.2 --grid 48.48.48.48 --accelerator-threads 8 --shm 2048 $OPT > dwf.48.48.48.48.4node.opt0
|
||||
OPT="--comms-overlap --shm-mpi 1"
|
||||
VOL=64.64.32.32
|
||||
srun ./benchmarks/Benchmark_dwf_fp32 --mpi 2.2.1.1 --grid $VOL --accelerator-threads 8 --shm 2048 $OPT
|
||||
#srun ./benchmarks/Benchmark_dwf_fp32 --mpi 2.1.1.4 --grid $VOL --accelerator-threads 8 --shm 2048 $OPT
|
||||
#srun ./benchmarks/Benchmark_dwf_fp32 --mpi 1.1.1.8 --grid $VOL --accelerator-threads 8 --shm 2048 $OPT
|
||||
|
||||
OPT="--comms-overlap --comms-concurrent --shm-mpi 1"
|
||||
srun ./benchmarks/Benchmark_dwf_fp32 --mpi 2.2.2.2 --grid 64.64.64.64 --accelerator-threads 8 --shm 2048 $OPT > dwf.64.64.64.64.4node.opt1
|
||||
srun ./benchmarks/Benchmark_dwf_fp32 --mpi 2.2.2.2 --grid 48.48.48.48 --accelerator-threads 8 --shm 2048 $OPT > dwf.48.48.48.48.4node.opt1
|
||||
|
@ -1,4 +1,4 @@
|
||||
|
||||
export CRAY_ACCEL_TARGET=nvidia80
|
||||
|
||||
module load PrgEnv-gnu cpe-cuda cuda
|
||||
module load PrgEnv-gnu cpe-cuda cudatoolkit/11.4
|
||||
|
@ -6,6 +6,8 @@
|
||||
--enable-simd=GPU \
|
||||
--disable-fermion-reps \
|
||||
--disable-gparity \
|
||||
--with-gmp=$OLCF_GMP_ROOT \
|
||||
--with-mpfr=/opt/cray/pe/gcc/mpfr/3.1.4/ \
|
||||
CXX=hipcc MPICXX=mpicxx \
|
||||
CXXFLAGS="-fPIC -I/opt/rocm-4.3.0/include/ -std=c++14 -I${MPICH_DIR}/include " \
|
||||
--prefix=/ccs/home/chulwoo/Grid \
|
||||
|
@ -1,8 +1,7 @@
|
||||
#!/bin/bash
|
||||
# Begin LSF Directives
|
||||
#SBATCH -A LGT104
|
||||
#SBATCH -t 01:00:00
|
||||
##SBATCH -U openmpThu
|
||||
#SBATCH -t 3:00:00
|
||||
#SBATCH -p ecp
|
||||
#SBATCH -J DWF
|
||||
#SBATCH -o DWF.%J
|
||||
@ -14,13 +13,12 @@ DIR=.
|
||||
module list
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
|
||||
export OMP_NUM_THREADS=8
|
||||
|
||||
AT=8
|
||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||
PARAMS=" --accelerator-threads ${AT} --grid 32.64.64.64 --mpi 1.2.2.2 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
srun -n8 --label -c$OMP_NUM_THREADS --gpus-per-task=1 ./mpiwrapper.sh ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||
PARAMS=" --accelerator-threads ${AT} --grid 16.16.16.48 --mpi 1.2.2.2 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
srun -N2 -n8 --label -c$OMP_NUM_THREADS --gpus-per-task=1 ./mpiwrapper.sh ./HMC/Mobius2p1f_DD_RHMC $PARAMS
|
||||
|
||||
|
@ -1,5 +1,9 @@
|
||||
module load emacs
|
||||
module load PrgEnv-gnu
|
||||
module load rocm/4.3.0
|
||||
module load rocm/4.5.0
|
||||
module load gmp
|
||||
module load cray-fftw
|
||||
module load craype-accel-amd-gfx908
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
export LD_LIBRARY_PATH=/opt/cray/pe/gcc/mpfr/3.1.4/lib/:$LD_LIBRARY_PATH
|
||||
|
@ -1,25 +1,25 @@
|
||||
tu-c0r0n00 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r0n00 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r0n09 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r0n00 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r0n06 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r0n06 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r0n09 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r0n09 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r0n03 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r0n06 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r0n09 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r0n00 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r0n03 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r0n03 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r0n06 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r0n03 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r3n00 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r3n00 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r3n00 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r3n00 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r3n06 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r3n06 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r3n06 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r3n06 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r3n03 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r3n03 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r3n03 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r3n03 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r3n09 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r3n09 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r3n09 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r3n09 - 3 device=3 binding=--interleave=6,7
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device Number : 0
|
||||
@ -33,11 +33,41 @@ AcceleratorCudaInit[0]: pciBusID: 3
|
||||
AcceleratorCudaInit[0]: pciDeviceID: 0
|
||||
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device Number : 0
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
@ -50,43 +80,25 @@ AcceleratorCudaInit[0]: pciBusID: 3
|
||||
AcceleratorCudaInit[0]: pciDeviceID: 0
|
||||
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
local rank 1 device 0 bus id: 0000:44:00.0
|
||||
AcceleratorCudaInit: ================================================
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
local rank 0 device 0 bus id: 0000:03:00.0
|
||||
AcceleratorCudaInit: ================================================
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
local rank 0 device 0 bus id: 0000:03:00.0
|
||||
AcceleratorCudaInit: ================================================
|
||||
AcceleratorCudaInit: ================================================
|
||||
local rank 2 device 0 bus id: 0000:84:00.0
|
||||
SharedMemoryMpi: World communicator of size 16
|
||||
SharedMemoryMpi: Node communicator of size 4
|
||||
0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 2147483648bytes at 0x7fcd80000000 for comms buffers
|
||||
0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 2147483648bytes at 0x153960000000 for comms buffers
|
||||
Setting up IPC
|
||||
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
@ -116,7 +128,7 @@ This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
Current Grid git commit hash=9d2238148c56e3fbadfa95dcabf2b83d4bde14cd: (HEAD -> develop) uncommited changes
|
||||
Current Grid git commit hash=da06d15f73184ceb15d66d4e7e702b02fed7b940: (HEAD -> feature/dirichlet, develop) uncommited changes
|
||||
|
||||
Grid : Message : ================================================
|
||||
Grid : Message : MPI is initialised and logging filters activated
|
||||
@ -124,122 +136,102 @@ Grid : Message : ================================================
|
||||
Grid : Message : Requested 2147483648 byte stencil comms buffers
|
||||
Grid : Message : MemoryManager Cache 34004218675 bytes
|
||||
Grid : Message : MemoryManager::Init() setting up
|
||||
Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 32 LARGE 8
|
||||
Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 8 LARGE 2
|
||||
Grid : Message : MemoryManager::Init() Non unified: Caching accelerator data in dedicated memory
|
||||
Grid : Message : MemoryManager::Init() Using cudaMalloc
|
||||
Grid : Message : 1.198523 s : Grid Layout
|
||||
Grid : Message : 1.198530 s : Global lattice size : 64 64 64 64
|
||||
Grid : Message : 1.198534 s : OpenMP threads : 4
|
||||
Grid : Message : 1.198535 s : MPI tasks : 2 2 2 2
|
||||
Grid : Message : 1.397615 s : Making s innermost grids
|
||||
Grid : Message : 1.441828 s : Initialising 4d RNG
|
||||
Grid : Message : 1.547973 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
||||
Grid : Message : 1.547998 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
||||
Grid : Message : 1.954777 s : Initialising 5d RNG
|
||||
Grid : Message : 3.633825 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
||||
Grid : Message : 3.633869 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
||||
Grid : Message : 12.162710 s : Initialised RNGs
|
||||
Grid : Message : 15.882520 s : Drawing gauge field
|
||||
Grid : Message : 15.816362 s : Random gauge initialised
|
||||
Grid : Message : 17.279671 s : Setting up Cshift based reference
|
||||
Grid : Message : 26.331426 s : *****************************************************************
|
||||
Grid : Message : 26.331452 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
||||
Grid : Message : 26.331454 s : *****************************************************************
|
||||
Grid : Message : 26.331456 s : *****************************************************************
|
||||
Grid : Message : 26.331458 s : * Benchmarking DomainWallFermionR::Dhop
|
||||
Grid : Message : 26.331459 s : * Vectorising space-time by 8
|
||||
Grid : Message : 26.331463 s : * VComplexF size is 64 B
|
||||
Grid : Message : 26.331465 s : * SINGLE precision
|
||||
Grid : Message : 26.331467 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 26.331468 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 26.331469 s : *****************************************************************
|
||||
Grid : Message : 28.413717 s : Called warmup
|
||||
Grid : Message : 56.418423 s : Called Dw 3000 times in 2.80047e+07 us
|
||||
Grid : Message : 56.418476 s : mflop/s = 3.79581e+07
|
||||
Grid : Message : 56.418479 s : mflop/s per rank = 2.37238e+06
|
||||
Grid : Message : 56.418481 s : mflop/s per node = 9.48953e+06
|
||||
Grid : Message : 56.418483 s : RF GiB/s (base 2) = 77130
|
||||
Grid : Message : 56.418485 s : mem GiB/s (base 2) = 48206.3
|
||||
Grid : Message : 56.422076 s : norm diff 1.03481e-13
|
||||
Grid : Message : 56.456894 s : #### Dhop calls report
|
||||
Grid : Message : 56.456899 s : WilsonFermion5D Number of DhopEO Calls : 6002
|
||||
Grid : Message : 56.456903 s : WilsonFermion5D TotalTime /Calls : 4710.93 us
|
||||
Grid : Message : 56.456905 s : WilsonFermion5D CommTime /Calls : 3196.15 us
|
||||
Grid : Message : 56.456908 s : WilsonFermion5D FaceTime /Calls : 494.392 us
|
||||
Grid : Message : 56.456910 s : WilsonFermion5D ComputeTime1/Calls : 44.4107 us
|
||||
Grid : Message : 56.456912 s : WilsonFermion5D ComputeTime2/Calls : 1037.75 us
|
||||
Grid : Message : 56.456921 s : Average mflops/s per call : 3.55691e+09
|
||||
Grid : Message : 56.456925 s : Average mflops/s per call per rank : 2.22307e+08
|
||||
Grid : Message : 56.456928 s : Average mflops/s per call per node : 8.89228e+08
|
||||
Grid : Message : 56.456930 s : Average mflops/s per call (full) : 3.82915e+07
|
||||
Grid : Message : 56.456933 s : Average mflops/s per call per rank (full): 2.39322e+06
|
||||
Grid : Message : 56.456952 s : Average mflops/s per call per node (full): 9.57287e+06
|
||||
Grid : Message : 56.456954 s : WilsonFermion5D Stencil
|
||||
Grid : Message : 56.457016 s : Stencil calls 3001
|
||||
Grid : Message : 56.457022 s : Stencil halogtime 0
|
||||
Grid : Message : 56.457024 s : Stencil gathertime 55.9154
|
||||
Grid : Message : 56.457026 s : Stencil gathermtime 20.1073
|
||||
Grid : Message : 56.457028 s : Stencil mergetime 18.5585
|
||||
Grid : Message : 56.457030 s : Stencil decompresstime 0.0639787
|
||||
Grid : Message : 56.457032 s : Stencil comms_bytes 4.02653e+08
|
||||
Grid : Message : 56.457034 s : Stencil commtime 6379.93
|
||||
Grid : Message : 56.457036 s : Stencil 63.1124 GB/s per rank
|
||||
Grid : Message : 56.457038 s : Stencil 252.45 GB/s per node
|
||||
Grid : Message : 56.457040 s : WilsonFermion5D StencilEven
|
||||
Grid : Message : 56.457048 s : WilsonFermion5D StencilOdd
|
||||
Grid : Message : 56.457062 s : WilsonFermion5D Stencil Reporti()
|
||||
Grid : Message : 56.457065 s : WilsonFermion5D StencilEven Reporti()
|
||||
Grid : Message : 56.457066 s : WilsonFermion5D StencilOdd Reporti()
|
||||
Grid : Message : 79.259261 s : Compare to naive wilson implementation Dag to verify correctness
|
||||
Grid : Message : 79.259287 s : Called DwDag
|
||||
Grid : Message : 79.259288 s : norm dag result 12.0421
|
||||
Grid : Message : 79.271740 s : norm dag ref 12.0421
|
||||
Grid : Message : 79.287759 s : norm dag diff 7.63236e-14
|
||||
Grid : Message : 79.328100 s : Calling Deo and Doe and //assert Deo+Doe == Dunprec
|
||||
Grid : Message : 79.955951 s : src_e0.499997
|
||||
Grid : Message : 80.633620 s : src_o0.500003
|
||||
Grid : Message : 80.164163 s : *********************************************************
|
||||
Grid : Message : 80.164168 s : * Benchmarking DomainWallFermionF::DhopEO
|
||||
Grid : Message : 80.164170 s : * Vectorising space-time by 8
|
||||
Grid : Message : 80.164172 s : * SINGLE precision
|
||||
Grid : Message : 80.164174 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 80.164177 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 80.164178 s : *********************************************************
|
||||
Grid : Message : 93.797635 s : Deo mflop/s = 3.93231e+07
|
||||
Grid : Message : 93.797670 s : Deo mflop/s per rank 2.45769e+06
|
||||
Grid : Message : 93.797672 s : Deo mflop/s per node 9.83077e+06
|
||||
Grid : Message : 93.797674 s : #### Dhop calls report
|
||||
Grid : Message : 93.797675 s : WilsonFermion5D Number of DhopEO Calls : 3001
|
||||
Grid : Message : 93.797677 s : WilsonFermion5D TotalTime /Calls : 4542.83 us
|
||||
Grid : Message : 93.797679 s : WilsonFermion5D CommTime /Calls : 2978.97 us
|
||||
Grid : Message : 93.797681 s : WilsonFermion5D FaceTime /Calls : 602.287 us
|
||||
Grid : Message : 93.797683 s : WilsonFermion5D ComputeTime1/Calls : 67.1416 us
|
||||
Grid : Message : 93.797685 s : WilsonFermion5D ComputeTime2/Calls : 1004.07 us
|
||||
Grid : Message : 93.797713 s : Average mflops/s per call : 3.30731e+09
|
||||
Grid : Message : 93.797717 s : Average mflops/s per call per rank : 2.06707e+08
|
||||
Grid : Message : 93.797719 s : Average mflops/s per call per node : 8.26827e+08
|
||||
Grid : Message : 93.797721 s : Average mflops/s per call (full) : 3.97084e+07
|
||||
Grid : Message : 93.797727 s : Average mflops/s per call per rank (full): 2.48178e+06
|
||||
Grid : Message : 93.797732 s : Average mflops/s per call per node (full): 9.92711e+06
|
||||
Grid : Message : 93.797735 s : WilsonFermion5D Stencil
|
||||
Grid : Message : 93.797746 s : WilsonFermion5D StencilEven
|
||||
Grid : Message : 93.797758 s : WilsonFermion5D StencilOdd
|
||||
Grid : Message : 93.797769 s : Stencil calls 3001
|
||||
Grid : Message : 93.797773 s : Stencil halogtime 0
|
||||
Grid : Message : 93.797776 s : Stencil gathertime 56.7458
|
||||
Grid : Message : 93.797780 s : Stencil gathermtime 22.6504
|
||||
Grid : Message : 93.797782 s : Stencil mergetime 21.1913
|
||||
Grid : Message : 93.797786 s : Stencil decompresstime 0.0556481
|
||||
Grid : Message : 93.797788 s : Stencil comms_bytes 2.01327e+08
|
||||
Grid : Message : 93.797791 s : Stencil commtime 2989.33
|
||||
Grid : Message : 93.797795 s : Stencil 67.3484 GB/s per rank
|
||||
Grid : Message : 93.797798 s : Stencil 269.394 GB/s per node
|
||||
Grid : Message : 93.797801 s : WilsonFermion5D Stencil Reporti()
|
||||
Grid : Message : 93.797803 s : WilsonFermion5D StencilEven Reporti()
|
||||
Grid : Message : 93.797805 s : WilsonFermion5D StencilOdd Reporti()
|
||||
Grid : Message : 93.873429 s : r_e6.02111
|
||||
Grid : Message : 93.879931 s : r_o6.02102
|
||||
Grid : Message : 93.885912 s : res12.0421
|
||||
Grid : Message : 94.876555 s : norm diff 0
|
||||
Grid : Message : 95.485643 s : norm diff even 0
|
||||
Grid : Message : 95.581236 s : norm diff odd 0
|
||||
Grid : Message : 1.875883 s : Grid Layout
|
||||
Grid : Message : 1.875893 s : Global lattice size : 64 64 64 64
|
||||
Grid : Message : 1.875897 s : OpenMP threads : 4
|
||||
Grid : Message : 1.875898 s : MPI tasks : 2 2 2 2
|
||||
Grid : Message : 1.993571 s : Initialising 4d RNG
|
||||
Grid : Message : 2.881990 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
||||
Grid : Message : 2.882370 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
||||
Grid : Message : 2.495044 s : Initialising 5d RNG
|
||||
Grid : Message : 4.120900 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
||||
Grid : Message : 4.121350 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
||||
Grid : Message : 15.268010 s : Drawing gauge field
|
||||
Grid : Message : 16.234025 s : Random gauge initialised
|
||||
Grid : Message : 16.234057 s : Applying BCs
|
||||
Grid : Message : 16.365565 s : Setting up Cshift based reference
|
||||
Grid : Message : 44.512418 s : *****************************************************************
|
||||
Grid : Message : 44.512448 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
||||
Grid : Message : 44.512450 s : *****************************************************************
|
||||
Grid : Message : 44.512451 s : *****************************************************************
|
||||
Grid : Message : 44.512452 s : * Benchmarking DomainWallFermionR::Dhop
|
||||
Grid : Message : 44.512453 s : * Vectorising space-time by 8
|
||||
Grid : Message : 44.512454 s : * VComplexF size is 64 B
|
||||
Grid : Message : 44.512456 s : * SINGLE precision
|
||||
Grid : Message : 44.512459 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 44.512460 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 44.512461 s : *****************************************************************
|
||||
Grid : Message : 46.389070 s : Called warmup
|
||||
Grid : Message : 49.211265 s : Called Dw 300 times in 2.82203e+06 us
|
||||
Grid : Message : 49.211295 s : mflop/s = 3.76681e+07
|
||||
Grid : Message : 49.211297 s : mflop/s per rank = 2.35425e+06
|
||||
Grid : Message : 49.211299 s : mflop/s per node = 9.41702e+06
|
||||
Grid : Message : 49.211301 s : RF GiB/s (base 2) = 76540.6
|
||||
Grid : Message : 49.211308 s : mem GiB/s (base 2) = 47837.9
|
||||
Grid : Message : 49.214868 s : norm diff 1.06409e-13
|
||||
Grid : Message : 92.647781 s : Compare to naive wilson implementation Dag to verify correctness
|
||||
Grid : Message : 92.647816 s : Called DwDag
|
||||
Grid : Message : 92.647817 s : norm dag result 12.0421
|
||||
Grid : Message : 92.801806 s : norm dag ref 12.0421
|
||||
Grid : Message : 92.817724 s : norm dag diff 7.21921e-14
|
||||
Grid : Message : 92.858973 s : Calling Deo and Doe and //assert Deo+Doe == Dunprec
|
||||
Grid : Message : 93.210378 s : src_e0.499997
|
||||
Grid : Message : 93.583286 s : src_o0.500003
|
||||
Grid : Message : 93.682468 s : *********************************************************
|
||||
Grid : Message : 93.682471 s : * Benchmarking DomainWallFermionF::DhopEO
|
||||
Grid : Message : 93.682472 s : * Vectorising space-time by 8
|
||||
Grid : Message : 93.682473 s : * SINGLE precision
|
||||
Grid : Message : 93.682475 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 93.682476 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 93.682477 s : *********************************************************
|
||||
Grid : Message : 95.162342 s : Deo mflop/s = 3.92487e+07
|
||||
Grid : Message : 95.162387 s : Deo mflop/s per rank 2.45305e+06
|
||||
Grid : Message : 95.162389 s : Deo mflop/s per node 9.81219e+06
|
||||
Grid : Message : 95.232801 s : r_e6.02111
|
||||
Grid : Message : 95.240061 s : r_o6.02102
|
||||
Grid : Message : 95.245975 s : res12.0421
|
||||
Grid : Message : 95.833402 s : norm diff 0
|
||||
Grid : Message : 96.573829 s : norm diff even 0
|
||||
Grid : Message : 96.868272 s : norm diff odd 0
|
||||
Dirichlet block [0 64 64 32 32]
|
||||
Grid : Message : 97.756909 s : Grid Layout
|
||||
Grid : Message : 97.756911 s : Global lattice size : 64 64 64 64
|
||||
Grid : Message : 97.756921 s : OpenMP threads : 4
|
||||
Grid : Message : 97.756922 s : MPI tasks : 2 2 2 2
|
||||
Grid : Message : 97.897085 s : Initialising 4d RNG
|
||||
Grid : Message : 97.965061 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
||||
Grid : Message : 97.965097 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
||||
Grid : Message : 98.367431 s : Initialising 5d RNG
|
||||
Grid : Message : 99.752745 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
||||
Grid : Message : 99.752790 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
||||
Grid : Message : 111.290148 s : Drawing gauge field
|
||||
Grid : Message : 112.349289 s : Random gauge initialised
|
||||
Grid : Message : 112.349320 s : Applying BCs
|
||||
Grid : Message : 113.948740 s : Setting up Cshift based reference
|
||||
Grid : Message : 140.320415 s : *****************************************************************
|
||||
Grid : Message : 140.320443 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
||||
Grid : Message : 140.320444 s : *****************************************************************
|
||||
Grid : Message : 140.320445 s : *****************************************************************
|
||||
Grid : Message : 140.320446 s : * Benchmarking DomainWallFermionR::Dhop
|
||||
Grid : Message : 140.320447 s : * Vectorising space-time by 8
|
||||
Grid : Message : 140.320448 s : * VComplexF size is 64 B
|
||||
Grid : Message : 140.320450 s : * SINGLE precision
|
||||
Grid : Message : 140.320451 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 140.320452 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 140.320453 s : *****************************************************************
|
||||
Grid : Message : 142.296150 s : Called warmup
|
||||
Grid : Message : 144.397678 s : Called Dw 300 times in 2.36719e+06 us
|
||||
Grid : Message : 144.397700 s : mflop/s = 4.49058e+07
|
||||
Grid : Message : 144.397702 s : mflop/s per rank = 2.80661e+06
|
||||
Grid : Message : 144.397704 s : mflop/s per node = 1.12265e+07
|
||||
Grid : Message : 144.397706 s : RF GiB/s (base 2) = 91247.6
|
||||
Grid : Message : 144.397708 s : mem GiB/s (base 2) = 57029.7
|
||||
Grid : Message : 144.401269 s : norm diff 9.78944e-14
|
||||
Grid : Message : 186.885460 s : Compare to naive wilson implementation Dag to verify correctness
|
||||
Grid : Message : 186.885492 s : Called DwDag
|
||||
Grid : Message : 186.885493 s : norm dag result 10.4157
|
||||
Grid : Message : 186.897154 s : norm dag ref 11.2266
|
||||
Grid : Message : 186.912538 s : norm dag diff 0.484633
|
||||
|
@ -1,14 +1,13 @@
|
||||
#!/bin/bash
|
||||
#SBATCH -J dslash
|
||||
#SBATCH -A tc002
|
||||
#SBATCH -t 2:20:00
|
||||
#SBATCH --nodelist=tu-c0r0n[00,03,06,09]
|
||||
#SBATCH -A dp207
|
||||
#SBATCH --exclusive
|
||||
#SBATCH --nodes=4
|
||||
#SBATCH --ntasks=16
|
||||
#SBATCH --qos=standard
|
||||
#SBATCH --ntasks-per-node=4
|
||||
#SBATCH --cpus-per-task=8
|
||||
#SBATCH --time=12:00:00
|
||||
#SBATCH --time=0:05:00
|
||||
#SBATCH --partition=gpu
|
||||
#SBATCH --gres=gpu:4
|
||||
#SBATCH --output=%x.%j.out
|
||||
|
@ -31,7 +31,6 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
;
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
Grid_init(&argc, &argv);
|
||||
@ -80,7 +79,8 @@ int main(int argc, char ** argv) {
|
||||
Foo=lex;
|
||||
}
|
||||
|
||||
typedef CartesianStencil<vobj,vobj,int> Stencil;
|
||||
typedef CartesianStencil<vobj,vobj,SimpleStencilParams> Stencil;
|
||||
SimpleStencilParams p;
|
||||
for(int dir=0;dir<4;dir++){
|
||||
for(int disp=0;disp<Fine._fdimensions[dir];disp++){
|
||||
|
||||
@ -90,7 +90,7 @@ int main(int argc, char ** argv) {
|
||||
std::vector<int> directions(npoint,dir);
|
||||
std::vector<int> displacements(npoint,disp);
|
||||
|
||||
Stencil myStencil(&Fine,npoint,0,directions,displacements,0);
|
||||
Stencil myStencil(&Fine,npoint,0,directions,displacements,p);
|
||||
Coordinate ocoor(4);
|
||||
for(int o=0;o<Fine.oSites();o++){
|
||||
Fine.oCoorFromOindex(ocoor,o);
|
||||
@ -183,8 +183,8 @@ int main(int argc, char ** argv) {
|
||||
std::vector<int> directions(npoint,dir);
|
||||
std::vector<int> displacements(npoint,disp);
|
||||
|
||||
Stencil EStencil(&rbFine,npoint,Even,directions,displacements,0);
|
||||
Stencil OStencil(&rbFine,npoint,Odd,directions,displacements,0);
|
||||
Stencil EStencil(&rbFine,npoint,Even,directions,displacements,p);
|
||||
Stencil OStencil(&rbFine,npoint,Odd,directions,displacements,p);
|
||||
|
||||
Coordinate ocoor(4);
|
||||
for(int o=0;o<Fine.oSites();o++){
|
||||
|
Reference in New Issue
Block a user