mirror of
https://github.com/paboyle/Grid.git
synced 2025-04-09 21:50:45 +01:00
perambLight done, but SliceShare and Write does not work yet
This commit is contained in:
parent
0a82fae45c
commit
f45d2d5dcc
@ -32,6 +32,8 @@ See the full license in the file "LICENSE" in the top level distribution directo
|
|||||||
#include <Hadrons/Global.hpp>
|
#include <Hadrons/Global.hpp>
|
||||||
#include <Hadrons/Environment.hpp>
|
#include <Hadrons/Environment.hpp>
|
||||||
#include <Hadrons/Solver.hpp>
|
#include <Hadrons/Solver.hpp>
|
||||||
|
#include "Grid/lattice/Lattice_peekpoke.h"
|
||||||
|
#include <Grid/Eigen/unsupported/CXX11/Tensor>
|
||||||
|
|
||||||
BEGIN_HADRONS_NAMESPACE
|
BEGIN_HADRONS_NAMESPACE
|
||||||
|
|
||||||
@ -117,7 +119,65 @@ public:
|
|||||||
return operator()(i, MyIndex);
|
return operator()(i, MyIndex);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
/*
|
||||||
|
#define BEGIN_GRID_NAMESPACE namespace Grid {
|
||||||
|
BEGIN_GRID_NAMESPACE
|
||||||
|
|
||||||
|
void CartesianCommunicatorCandidate::SliceShare( GridBase * gridLowDim, GridBase * gridHighDim, void * Buffer, int BufferSize )
|
||||||
|
{
|
||||||
|
// Work out which dimension is the spread-out dimension
|
||||||
|
assert(gridLowDim);
|
||||||
|
assert(gridHighDim);
|
||||||
|
const int iNumDims{(const int)gridHighDim->_gdimensions.size()};
|
||||||
|
assert(iNumDims == gridLowDim->_gdimensions.size());
|
||||||
|
int dimSpreadOut = -1;
|
||||||
|
std::vector<int> coor(iNumDims);
|
||||||
|
for( int i = 0 ; i < iNumDims ; i++ ) {
|
||||||
|
coor[i] = gridHighDim->_processor_coor[i];
|
||||||
|
if( gridLowDim->_gdimensions[i] != gridHighDim->_gdimensions[i] ) {
|
||||||
|
assert( dimSpreadOut == -1 );
|
||||||
|
assert( gridLowDim->_processors[i] == 1 ); // easiest assumption to make for now
|
||||||
|
dimSpreadOut = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if( dimSpreadOut != -1 && gridHighDim->_processors[dimSpreadOut] != gridLowDim->_processors[dimSpreadOut] ) {
|
||||||
|
// Make sure the same number of data elements exist on each slice
|
||||||
|
const int NumSlices{gridHighDim->_processors[dimSpreadOut] / gridLowDim->_processors[dimSpreadOut]};
|
||||||
|
assert(gridHighDim->_processors[dimSpreadOut] == gridLowDim->_processors[dimSpreadOut] * NumSlices);
|
||||||
|
const int SliceSize{BufferSize/NumSlices};
|
||||||
|
CCC_DEBUG_DUMP(Buffer, NumSlices, SliceSize);
|
||||||
|
assert(BufferSize == SliceSize * NumSlices);
|
||||||
|
#ifndef USE_LOCAL_SLICES
|
||||||
|
assert(0); // Can't do this without MPI (should really test whether MPI is defined)
|
||||||
|
#else
|
||||||
|
const auto MyRank{gridHighDim->ThisRank()};
|
||||||
|
std::vector<CommsRequest_t> reqs(0);
|
||||||
|
int MySlice{coor[dimSpreadOut]};
|
||||||
|
char * const _buffer{(char *)Buffer};
|
||||||
|
char * const MyData{_buffer + MySlice * SliceSize};
|
||||||
|
for(int i = 1; i < NumSlices ; i++ ){
|
||||||
|
int SendSlice = ( MySlice + i ) % NumSlices;
|
||||||
|
int RecvSlice = ( MySlice - i + NumSlices ) % NumSlices;
|
||||||
|
char * const RecvData{_buffer + RecvSlice * SliceSize};
|
||||||
|
coor[dimSpreadOut] = SendSlice;
|
||||||
|
const auto SendRank{gridHighDim->RankFromProcessorCoor(coor)};
|
||||||
|
coor[dimSpreadOut] = RecvSlice;
|
||||||
|
const auto RecvRank{gridHighDim->RankFromProcessorCoor(coor)};
|
||||||
|
std::cout << GridLogMessage << "Send slice " << MySlice << " (" << MyRank << ") to " << SendSlice << " (" << SendRank
|
||||||
|
<< "), receive slice from " << RecvSlice << " (" << RecvRank << ")" << std::endl;
|
||||||
|
gridHighDim->SendToRecvFromBegin(reqs,MyData,SendRank,RecvData,RecvRank,SliceSize);
|
||||||
|
//memcpy(RecvData,MyData,SliceSize); // Debug
|
||||||
|
}
|
||||||
|
gridHighDim->SendToRecvFromComplete(reqs);
|
||||||
|
std::cout << GridLogMessage << "Slice data shared." << std::endl;
|
||||||
|
CCC_DEBUG_DUMP(Buffer, NumSlices, SliceSize);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#define END_GRID_NAMESPACE }
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
END_HADRONS_NAMESPACE
|
END_HADRONS_NAMESPACE
|
||||||
|
|
||||||
|
@ -107,7 +107,6 @@ void TDistilVectors<FImpl>::setup(void)
|
|||||||
envTmp(LatticeSpinColourVector, "tmp3d",1,LatticeSpinColourVector(grid3d));
|
envTmp(LatticeSpinColourVector, "tmp3d",1,LatticeSpinColourVector(grid3d));
|
||||||
envTmp(LatticeColourVector, "tmp3d_nospin",1,LatticeColourVector(grid3d));
|
envTmp(LatticeColourVector, "tmp3d_nospin",1,LatticeColourVector(grid3d));
|
||||||
envTmp(LatticeSpinColourVector, "sink_tslice",1,LatticeSpinColourVector(grid3d));
|
envTmp(LatticeSpinColourVector, "sink_tslice",1,LatticeSpinColourVector(grid3d));
|
||||||
envTmp(LatticeSpinColourVector, "sink4d",1,LatticeSpinColourVector(grid4d));
|
|
||||||
envTmp(LatticeColourVector, "evec3d",1,LatticeColourVector(grid3d));
|
envTmp(LatticeColourVector, "evec3d",1,LatticeColourVector(grid3d));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -117,8 +116,7 @@ void TDistilVectors<FImpl>::execute(void)
|
|||||||
{
|
{
|
||||||
|
|
||||||
auto &noise = envGet(std::vector<std::vector<std::vector<SpinVector>>>, par().noise);
|
auto &noise = envGet(std::vector<std::vector<std::vector<SpinVector>>>, par().noise);
|
||||||
//auto &perambulator = envGet(std::vector<SpinVector>, par().perambulator);
|
auto &perambulator = envGet(Perambulator<SpinVector>, getName() + "_perambulator_light");
|
||||||
auto &perambulator = envGet(Perambulator<SpinVector>, par().noise);
|
|
||||||
auto &epack = envGet(Grid::Hadrons::EigenPack<LatticeColourVector>, par().eigenPack);
|
auto &epack = envGet(Grid::Hadrons::EigenPack<LatticeColourVector>, par().eigenPack);
|
||||||
auto &rho = envGet(std::vector<FermionField>, getName() + "_rho");
|
auto &rho = envGet(std::vector<FermionField>, getName() + "_rho");
|
||||||
auto &phi = envGet(std::vector<FermionField>, getName() + "_phi");
|
auto &phi = envGet(std::vector<FermionField>, getName() + "_phi");
|
||||||
@ -129,7 +127,6 @@ void TDistilVectors<FImpl>::execute(void)
|
|||||||
envGetTmp(LatticeSpinColourVector, tmp3d);
|
envGetTmp(LatticeSpinColourVector, tmp3d);
|
||||||
envGetTmp(LatticeColourVector, tmp3d_nospin);
|
envGetTmp(LatticeColourVector, tmp3d_nospin);
|
||||||
envGetTmp(LatticeSpinColourVector, sink_tslice);
|
envGetTmp(LatticeSpinColourVector, sink_tslice);
|
||||||
envGetTmp(LatticeSpinColourVector, sink4d);
|
|
||||||
envGetTmp(LatticeColourVector, evec3d);
|
envGetTmp(LatticeColourVector, evec3d);
|
||||||
|
|
||||||
GridCartesian * grid4d = env().getGrid();
|
GridCartesian * grid4d = env().getGrid();
|
||||||
|
@ -88,13 +88,103 @@ void TPerambLight<FImpl>::setup(void)
|
|||||||
envCreate(Perambulator<SpinVector>, getName() + "_perambulator_light", 1,
|
envCreate(Perambulator<SpinVector>, getName() + "_perambulator_light", 1,
|
||||||
noise.size() *nvec*Nt);
|
noise.size() *nvec*Nt);
|
||||||
|
|
||||||
|
GridCartesian * grid4d = env().getGrid();
|
||||||
|
std::vector<int> latt_size = GridDefaultLatt();
|
||||||
|
std::vector<int> simd_layout = GridDefaultSimd(Nd, vComplex::Nsimd());
|
||||||
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
|
std::vector<int> simd_layout_3 = GridDefaultSimd(Nd-1, vComplex::Nsimd());
|
||||||
|
latt_size[Nd-1] = 1;
|
||||||
|
simd_layout_3.push_back( 1 );
|
||||||
|
mpi_layout[Nd-1] = 1;
|
||||||
|
GridCartesian * grid3d = new GridCartesian(latt_size,simd_layout_3,mpi_layout,*grid4d);
|
||||||
|
|
||||||
|
envTmp(LatticeSpinColourVector, "dist_source",1,LatticeSpinColourVector(grid4d));
|
||||||
|
envTmp(LatticeSpinColourVector, "tmp2",1,LatticeSpinColourVector(grid4d));
|
||||||
|
envTmp(LatticeSpinColourVector, "result",1,LatticeSpinColourVector(grid4d));
|
||||||
|
envTmp(LatticeSpinColourVector, "result_single_component",1,LatticeSpinColourVector(grid4d));
|
||||||
|
envTmp(LatticeColourVector, "result_nospin",1,LatticeColourVector(grid4d));
|
||||||
|
envTmp(LatticeColourVector, "tmp_nospin",1,LatticeColourVector(grid4d));
|
||||||
|
envTmp(LatticeSpinColourVector, "tmp3d",1,LatticeSpinColourVector(grid3d));
|
||||||
|
envTmp(LatticeColourVector, "tmp3d_nospin",1,LatticeColourVector(grid3d));
|
||||||
|
envTmp(LatticeColourVector, "result_3d",1,LatticeColourVector(grid3d));
|
||||||
|
envTmp(LatticeColourVector, "evec3d",1,LatticeColourVector(grid3d));
|
||||||
|
envTmp(LatticeSpinVector, "peramb_tmp",1,LatticeSpinVector(grid4d));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// execution ///////////////////////////////////////////////////////////////////
|
// execution ///////////////////////////////////////////////////////////////////
|
||||||
template <typename FImpl>
|
template <typename FImpl>
|
||||||
void TPerambLight<FImpl>::execute(void)
|
void TPerambLight<FImpl>::execute(void)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
|
auto &noise = envGet(std::vector<std::vector<std::vector<SpinVector>>>, par().noise);
|
||||||
|
auto &perambulator = envGet(Perambulator<SpinVector>, getName() + "_perambulator_light");
|
||||||
|
auto &epack = envGet(Grid::Hadrons::EigenPack<LatticeColourVector>, par().eigenPack);
|
||||||
|
|
||||||
|
GridCartesian * grid4d = env().getGrid();
|
||||||
|
std::vector<int> latt_size = GridDefaultLatt();
|
||||||
|
std::vector<int> simd_layout = GridDefaultSimd(Nd, vComplex::Nsimd());
|
||||||
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
|
std::vector<int> simd_layout_3 = GridDefaultSimd(Nd-1, vComplex::Nsimd());
|
||||||
|
latt_size[Nd-1] = 1;
|
||||||
|
simd_layout_3.push_back( 1 );
|
||||||
|
mpi_layout[Nd-1] = 1;
|
||||||
|
GridCartesian * grid3d = new GridCartesian(latt_size,simd_layout_3,mpi_layout,*grid4d);
|
||||||
|
|
||||||
|
LatticeGaugeField Umu(grid4d);
|
||||||
|
FieldMetaData header;
|
||||||
|
std::string fileName( "/home/dp008/dp008/dc-rich6/Scripts/ConfigsDeflQED/ckpoint_lat.3000" );
|
||||||
|
std::cout << GridLogMessage << "Loading NERSC configuration from '" << fileName << "'" << std::endl;
|
||||||
|
NerscIO::readConfiguration(Umu, header, fileName);
|
||||||
|
std::cout << GridLogMessage << "reading done." << std::endl;
|
||||||
|
|
||||||
|
envGetTmp(LatticeSpinColourVector, dist_source);
|
||||||
|
envGetTmp(LatticeSpinColourVector, tmp2);
|
||||||
|
envGetTmp(LatticeSpinColourVector, result);
|
||||||
|
envGetTmp(LatticeSpinColourVector, result_single_component);
|
||||||
|
envGetTmp(LatticeColourVector, result_nospin);
|
||||||
|
envGetTmp(LatticeColourVector, tmp_nospin);
|
||||||
|
envGetTmp(LatticeSpinColourVector, tmp3d);
|
||||||
|
envGetTmp(LatticeColourVector, tmp3d_nospin);
|
||||||
|
envGetTmp(LatticeColourVector, result_3d);
|
||||||
|
envGetTmp(LatticeColourVector, evec3d);
|
||||||
|
envGetTmp(LatticeSpinVector, peramb_tmp);
|
||||||
|
|
||||||
|
int Ntlocal = grid4d->LocalDimensions()[3];
|
||||||
|
int Ntfirst = grid4d->LocalStarts()[3];
|
||||||
|
|
||||||
|
int tsrc=0;
|
||||||
|
int nnoise=1;
|
||||||
|
int LI=6;
|
||||||
|
int Ns=4;
|
||||||
|
int Nt_inv=1;
|
||||||
|
int Nt=64;
|
||||||
|
int TI=64;
|
||||||
|
int nvec=6;
|
||||||
|
bool full_tdil=true;
|
||||||
|
|
||||||
|
Real mass=0.005; // TODO Infile
|
||||||
|
Real M5 =1.8; // TODO Infile
|
||||||
|
std::cout << "init RBG " << std::endl;
|
||||||
|
GridRedBlackCartesian RBGrid(grid4d);
|
||||||
|
std::cout << "init RBG done" << std::endl;
|
||||||
|
|
||||||
|
int Ls=16;
|
||||||
|
|
||||||
|
double CGPrecision = 10e-8;
|
||||||
|
int MaxIterations = 10000;
|
||||||
|
|
||||||
|
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,grid4d);
|
||||||
|
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,grid4d);
|
||||||
|
|
||||||
|
typedef DomainWallFermionR FermionAction;
|
||||||
|
|
||||||
|
FermionAction Dop(Umu,*FGrid,*FrbGrid,*grid4d,RBGrid,mass,M5);
|
||||||
|
|
||||||
|
MdagMLinearOperator<FermionAction,LatticeFermion> HermOp(Dop);
|
||||||
|
ConjugateGradient<LatticeFermion> CG(CGPrecision,MaxIterations);
|
||||||
|
SchurRedBlackDiagMooeeSolve<LatticeFermion> SchurSolver(CG);
|
||||||
|
|
||||||
for (int inoise = 0; inoise < nnoise; inoise++) {
|
for (int inoise = 0; inoise < nnoise; inoise++) {
|
||||||
for (int dk = 0; dk < LI; dk++) {
|
for (int dk = 0; dk < LI; dk++) {
|
||||||
for (int dt = 0; dt < Nt_inv; dt++) {
|
for (int dt = 0; dt < Nt_inv; dt++) {
|
||||||
@ -109,16 +199,12 @@ void TPerambLight<FImpl>::execute(void)
|
|||||||
for (int ik = dk; ik < nvec; ik += LI){
|
for (int ik = dk; ik < nvec; ik += LI){
|
||||||
for (int is = ds; is < Ns; is += Ns){ //at the moment, full spin dilution is enforced
|
for (int is = ds; is < Ns; is += Ns){ //at the moment, full spin dilution is enforced
|
||||||
std::cout << "LapH source vector from noise " << it << " and dilution component (d_k,d_t,d_alpha) : (" << ik << ","<< is << ")" << std::endl;
|
std::cout << "LapH source vector from noise " << it << " and dilution component (d_k,d_t,d_alpha) : (" << ik << ","<< is << ")" << std::endl;
|
||||||
ExtractSliceLocal(evec3d,eig4d.evec[ik],0,it,3);
|
ExtractSliceLocal(evec3d,epack.evec[ik],0,it,3);
|
||||||
tmp3d_nospin = evec3d * noises[inoise][it][ik]()(is)(); //noises do not have to be a spin vector
|
tmp3d_nospin = evec3d * noise[inoise][it][ik]()(is)(); //noises do not have to be a spin vector
|
||||||
tmp3d=zero;
|
tmp3d=zero;
|
||||||
pokeSpin(tmp3d,tmp3d_nospin,is);
|
pokeSpin(tmp3d,tmp3d_nospin,is);
|
||||||
tmp2=zero;
|
tmp2=zero;
|
||||||
#ifdef USE_LOCAL_SLICES
|
|
||||||
InsertSliceLocal(tmp3d,tmp2,0,it-Ntfirst,Grid::QCD::Tdir);
|
InsertSliceLocal(tmp3d,tmp2,0,it-Ntfirst,Grid::QCD::Tdir);
|
||||||
#else
|
|
||||||
InsertSlice(tmp3d,tmp2,it,Grid::QCD::Tdir);
|
|
||||||
#endif
|
|
||||||
dist_source += tmp2;
|
dist_source += tmp2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -131,31 +217,29 @@ void TPerambLight<FImpl>::execute(void)
|
|||||||
Dop.ImportPhysicalFermionSource(dist_source,src5);
|
Dop.ImportPhysicalFermionSource(dist_source,src5);
|
||||||
SchurSolver(Dop,src5,sol5);
|
SchurSolver(Dop,src5,sol5);
|
||||||
Dop.ExportPhysicalFermionSolution(sol5,result); //These are the meson sinks
|
Dop.ExportPhysicalFermionSolution(sol5,result); //These are the meson sinks
|
||||||
if (compute_current_sink)
|
//if (compute_current_sink)
|
||||||
current_sink[inoise+nnoise*(dk+LI*(dt+Nt_inv*ds))] = result;
|
// current_sink[inoise+nnoise*(dk+LI*(dt+Nt_inv*ds))] = result;
|
||||||
std::cout << "Contraction of perambulator from noise " << inoise << " and dilution component (d_k,d_t,d_alpha) : (" << dk << ","<< dt << "," << ds << ")" << std::endl;
|
std::cout << "Contraction of perambulator from noise " << inoise << " and dilution component (d_k,d_t,d_alpha) : (" << dk << ","<< dt << "," << ds << ")" << std::endl;
|
||||||
for (int is = 0; is < Ns; is++) {
|
for (int is = 0; is < Ns; is++) {
|
||||||
result_nospin = peekSpin(result,is);
|
result_nospin = peekSpin(result,is);
|
||||||
for (int t = Ntfirst; t < Ntfirst + Ntlocal; t++) {
|
for (int t = Ntfirst; t < Ntfirst + Ntlocal; t++) {
|
||||||
#ifdef USE_LOCAL_SLICES
|
|
||||||
ExtractSliceLocal(result_3d,result_nospin,0,t-Ntfirst,Grid::QCD::Tdir);
|
ExtractSliceLocal(result_3d,result_nospin,0,t-Ntfirst,Grid::QCD::Tdir);
|
||||||
#else
|
|
||||||
ExtractSlice(result_3d,result_nospin,t,3);
|
|
||||||
#endif
|
|
||||||
for (int ivec = 0; ivec < nvec; ivec++) {
|
for (int ivec = 0; ivec < nvec; ivec++) {
|
||||||
ExtractSliceLocal(evec3d,eig4d.evec[ivec],0,t,3);
|
ExtractSliceLocal(evec3d,epack.evec[ivec],0,t,3);
|
||||||
pokeSpin(perambulator(t, ivec, dk, inoise,dt,ds),innerProduct(evec3d, result_3d),is);
|
pokeSpin(perambulator(t, ivec, dk, inoise,dt,ds),innerProduct(evec3d, result_3d),is);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
std::cout << "perambulator done" << std::endl;
|
std::cout << "perambulator done" << std::endl;
|
||||||
perambulator.SliceShare( grid3d, grid4d );
|
//perambulator.SliceShare( grid3d, grid4d );
|
||||||
|
|
||||||
// THIS IS WHERE WE WANT TO SAVE THE PERAMBULATORS TO DISK
|
// THIS IS WHERE WE WANT TO SAVE THE PERAMBULATORS TO DISK
|
||||||
perambulator.WriteTemporary(std::string(pszPerambPack));
|
//perambulator.WriteTemporary(std::string("perambulators/file"));
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
END_MODULE_NAMESPACE
|
END_MODULE_NAMESPACE
|
||||||
|
Loading…
x
Reference in New Issue
Block a user