2016-01-02 14:51:32 +00:00
|
|
|
/*************************************************************************************
|
|
|
|
|
|
|
|
Grid physics library, www.github.com/paboyle/Grid
|
|
|
|
|
|
|
|
Source file: ./tests/Test_GaugeAction.cc
|
|
|
|
|
|
|
|
Copyright (C) 2015
|
|
|
|
|
|
|
|
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
|
|
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
|
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License along
|
|
|
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
|
|
|
|
See the full license in the file "LICENSE" in the top level distribution directory
|
|
|
|
*************************************************************************************/
|
|
|
|
/* END LEGAL */
|
2016-07-07 22:31:07 +01:00
|
|
|
#include <Grid/Grid.h>
|
2015-06-05 10:29:42 +01:00
|
|
|
|
2015-06-05 10:15:31 +01:00
|
|
|
using namespace std;
|
|
|
|
using namespace Grid;
|
2018-01-15 09:37:58 +00:00
|
|
|
;
|
2015-06-05 10:15:31 +01:00
|
|
|
|
2015-06-10 11:30:27 +01:00
|
|
|
|
|
|
|
/* For Metropolis */
|
|
|
|
class Metropolis {
|
|
|
|
public:
|
|
|
|
GridSerialRNG & sRNG;
|
|
|
|
Metropolis(GridSerialRNG & _sRNG) : sRNG(_sRNG) {};
|
|
|
|
bool AcceptReject(const RealD Delta)
|
|
|
|
{
|
|
|
|
RealD rand;
|
|
|
|
|
|
|
|
if(Delta <=0.0) return true;
|
|
|
|
|
|
|
|
random(sRNG,rand);
|
|
|
|
if(rand <= exp(-Delta))
|
|
|
|
return true;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-06-14 00:52:26 +01:00
|
|
|
|
2015-06-05 10:15:31 +01:00
|
|
|
int main (int argc, char ** argv)
|
|
|
|
{
|
|
|
|
Grid_init(&argc,&argv);
|
|
|
|
|
|
|
|
|
2018-02-24 22:19:28 +00:00
|
|
|
Coordinate simd_layout = GridDefaultSimd(4,vComplex::Nsimd());
|
|
|
|
Coordinate mpi_layout = GridDefaultMpi();
|
|
|
|
Coordinate latt_size ({16,16,16,32});
|
|
|
|
Coordinate clatt_size ({4,4,4,8});
|
2015-06-05 10:15:31 +01:00
|
|
|
int orthodir=3;
|
|
|
|
int orthosz =latt_size[orthodir];
|
|
|
|
|
|
|
|
GridCartesian Fine(latt_size,simd_layout,mpi_layout);
|
|
|
|
GridCartesian Coarse(clatt_size,simd_layout,mpi_layout);
|
|
|
|
|
|
|
|
LatticeGaugeField Umu(&Fine);
|
|
|
|
|
|
|
|
std::vector<LatticeColourMatrix> U(4,&Fine);
|
|
|
|
|
2017-06-19 22:03:03 +01:00
|
|
|
FieldMetaData header;
|
2015-06-05 10:15:31 +01:00
|
|
|
|
|
|
|
std::string file("./ckpoint_lat.4000");
|
Binary IO file for generic Grid array parallel I/O.
Number of IO MPI tasks can be varied by selecting which
dimensions use parallel IO and which dimensions use Serial send to boss
I/O.
Thus can neck down from, say 1024 nodes = 4x4x8x8 to {1,8,32,64,128,256,1024} nodes
doing the I/O.
Interpolates nicely between ALL nodes write their data, a single boss per time-plane
in processor space [old UKQCD fortran code did this], and a single node doing all I/O.
Not sure I have the transfer sizes big enough and am not overly convinced fstream
is guaranteed to not give buffer inconsistencies unless I set streambuf size to zero.
Practically it has worked on 8 tasks, 2x1x2x2 writing /cloning NERSC configurations
on my MacOS + OpenMPI and Clang environment.
It is VERY easy to switch to pwrite at a later date, and also easy to send x-strips around from
each node in order to gather bigger chunks at the syscall level.
That would push us up to the circa 8x 18*4*8 == 4KB size write chunk, and by taking, say, x/y non
parallel we get to 16MB contiguous chunks written in multi 4KB transactions
per IOnode in 64^3 lattices for configuration I/O.
I suspect this is fine for system performance.
2015-08-26 13:40:29 +01:00
|
|
|
NerscIO::readConfiguration(Umu,header,file);
|
2015-06-05 10:15:31 +01:00
|
|
|
|
|
|
|
for(int mu=0;mu<Nd;mu++){
|
2015-06-30 15:01:26 +01:00
|
|
|
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
|
2015-06-05 10:15:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Painful ; fix syntactical niceness : to check reader
|
|
|
|
LatticeComplex LinkTrace(&Fine);
|
2018-01-27 23:46:02 +00:00
|
|
|
LinkTrace=Zero();
|
2015-06-05 10:15:31 +01:00
|
|
|
for(int mu=0;mu<Nd;mu++){
|
|
|
|
LinkTrace = LinkTrace + trace(U[mu]);
|
|
|
|
}
|
|
|
|
|
|
|
|
// (1+2+3)=6 = N(N-1)/2 terms // equals to Double S Chorma
|
|
|
|
// in LatticeGaugeField out Plaq
|
|
|
|
// class WilsonLoop {
|
|
|
|
// RealD plaquette(LatticeGaugeField &Umu);
|
|
|
|
// void staple(LatticeSomethingorOther,LatticeGaugeField &Umu);
|
|
|
|
// RealD rectangle(LatticeGaugeField &Umu);
|
|
|
|
// LatticeComplex sitePlaquette()
|
|
|
|
// }
|
|
|
|
// covariantCshift ???
|
|
|
|
// GaugeActionBase
|
|
|
|
// GaugeActionPlaquette
|
|
|
|
// GaugeActionPlaquettePlusRectangle
|
|
|
|
// GaugeActionIwasaki
|
|
|
|
// GaugeActionSymanzik
|
|
|
|
// GaugeActionWilson
|
|
|
|
// Heatbath and quenched update.
|
|
|
|
//
|
|
|
|
LatticeColourMatrix tmpU(&Fine);
|
|
|
|
|
|
|
|
LatticeComplex Plaq(&Fine);
|
|
|
|
LatticeComplex cPlaq(&Coarse);
|
2018-01-27 23:46:02 +00:00
|
|
|
Plaq = Zero();
|
2015-06-05 10:15:31 +01:00
|
|
|
for(int mu=1;mu<Nd;mu++){
|
|
|
|
for(int nu=0;nu<mu;nu++){
|
2016-01-02 13:37:25 +00:00
|
|
|
Plaq = Plaq + trace(PeriodicBC::CovShiftForward(U[mu],mu,U[nu])*adj(PeriodicBC::CovShiftForward(U[nu],nu,U[mu])));
|
2015-06-05 10:15:31 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
double vol = Fine.gSites();
|
|
|
|
Complex PlaqScale(1.0/vol/6.0/3.0);
|
|
|
|
RealD StapScale(1.0/vol/6.0/3.0);
|
2015-07-23 17:31:13 +01:00
|
|
|
std::cout<<GridLogMessage <<"PlaqScale" << PlaqScale<<std::endl;
|
2015-06-05 10:15:31 +01:00
|
|
|
std::vector<TComplex> Plaq_T(orthosz);
|
|
|
|
sliceSum(Plaq,Plaq_T,Nd-1);
|
|
|
|
int Nt = Plaq_T.size();
|
|
|
|
|
|
|
|
|
|
|
|
TComplex Plaq_T_sum;
|
2018-01-27 23:46:02 +00:00
|
|
|
Plaq_T_sum=Zero();
|
2015-06-05 10:15:31 +01:00
|
|
|
for(int t=0;t<Nt;t++){
|
|
|
|
Plaq_T_sum = Plaq_T_sum+Plaq_T[t];
|
|
|
|
Complex Pt=TensorRemove(Plaq_T[t]);
|
2015-07-23 17:31:13 +01:00
|
|
|
std::cout<<GridLogMessage << "sliced ["<<t<<"]" <<Pt*PlaqScale*Real(Nt) << std::endl;
|
2015-06-05 10:15:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
Complex Pt = TensorRemove(Plaq_T_sum);
|
2015-07-23 17:31:13 +01:00
|
|
|
std::cout<<GridLogMessage << "total " <<Pt*PlaqScale<<std::endl;
|
2015-06-05 10:15:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
TComplex Tp = sum(Plaq);
|
|
|
|
Complex p = TensorRemove(Tp);
|
2015-07-23 17:31:13 +01:00
|
|
|
std::cout<<GridLogMessage << "calculated plaquettes " <<p*PlaqScale<<std::endl;
|
2015-06-05 10:15:31 +01:00
|
|
|
|
2015-06-05 10:29:42 +01:00
|
|
|
RealD avg_plaq = ColourWilsonLoops::avgPlaquette(Umu);
|
2015-07-23 17:31:13 +01:00
|
|
|
std::cout<<GridLogMessage << "NEW : calculated real plaquettes " <<avg_plaq<<std::endl;
|
2015-06-05 10:15:31 +01:00
|
|
|
|
|
|
|
RealD stap_plaq=0.0;
|
|
|
|
LatticeColourMatrix stap(&Fine);
|
|
|
|
LatticeComplex stap_tr(&Fine);
|
|
|
|
for(int mu=0;mu<Nd;mu++){
|
2015-06-05 10:29:42 +01:00
|
|
|
ColourWilsonLoops::Staple(stap,Umu,mu);
|
2015-12-28 16:38:31 +00:00
|
|
|
stap_tr = trace(stap*U[mu]);
|
2015-06-05 10:15:31 +01:00
|
|
|
TComplex Ts = sum(stap_tr);
|
|
|
|
Complex s = TensorRemove(Ts);
|
|
|
|
stap_plaq+=real(s);
|
|
|
|
}
|
2015-07-23 17:31:13 +01:00
|
|
|
std::cout<<GridLogMessage << "NEW : plaquette via staples"<< stap_plaq*StapScale*0.25<< std::endl;
|
2015-06-05 10:15:31 +01:00
|
|
|
Complex LinkTraceScale(1.0/vol/4.0/3.0);
|
|
|
|
TComplex Tl = sum(LinkTrace);
|
|
|
|
Complex l = TensorRemove(Tl);
|
2015-07-23 17:31:13 +01:00
|
|
|
std::cout<<GridLogMessage << "calculated link trace " <<l*LinkTraceScale<<std::endl;
|
2015-06-05 10:15:31 +01:00
|
|
|
|
2015-06-08 12:04:59 +01:00
|
|
|
blockSum(cPlaq,Plaq);
|
2015-06-05 10:15:31 +01:00
|
|
|
TComplex TcP = sum(cPlaq);
|
|
|
|
Complex ll= TensorRemove(TcP);
|
2015-07-23 17:31:13 +01:00
|
|
|
std::cout<<GridLogMessage << "coarsened plaquettes sum to " <<ll*PlaqScale<<std::endl;
|
2015-06-05 10:15:31 +01:00
|
|
|
|
|
|
|
|
|
|
|
Grid_finalize();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|