mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-13 04:37:05 +01:00
Small change in the HMC interface.
Example of multiple levels in the WilsonFermion hmc test. Merge remote-tracking branch 'upstream/master' Conflicts: lib/qcd/hmc/HMC.h lib/qcd/hmc/integrators/Integrator.h lib/qcd/hmc/integrators/Integrator_algorithm.h tests/Test_simd.cc
This commit is contained in:
@ -1,6 +1,13 @@
|
||||
#ifndef GRID_ALIGNED_ALLOCATOR_H
|
||||
#define GRID_ALIGNED_ALLOCATOR_H
|
||||
|
||||
#ifdef HAVE_MALLOC_MALLOC_H
|
||||
#include <malloc/malloc.h>
|
||||
#endif
|
||||
#ifdef HAVE_MALLOC_H
|
||||
#include <malloc.h>
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
#ifdef HAVE_MM_MALLOC_H
|
||||
#include <mm_malloc.h>
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* lib/GridConfig.h. Generated from GridConfig.h.in by configure. */
|
||||
/* lib/GridConfig.h.in. Generated from configure.ac by autoheader. */
|
||||
/* lib/Config.h. Generated from Config.h.in by configure. */
|
||||
/* lib/Config.h.in. Generated from configure.ac by autoheader. */
|
||||
|
||||
/* AVX Intrinsics */
|
||||
/* #undef AVX1 */
|
||||
@ -34,9 +34,6 @@
|
||||
/* Support AVX2 (Advanced Vector Extensions 2) instructions */
|
||||
/* #undef HAVE_AVX2 */
|
||||
|
||||
/* define if the compiler supports basic C++11 syntax */
|
||||
/* #undef HAVE_CXX11 */
|
||||
|
||||
/* Define to 1 if you have the declaration of `be64toh', and to 0 if you
|
||||
don't. */
|
||||
#define HAVE_DECL_BE64TOH 1
|
@ -1,4 +1,4 @@
|
||||
/* lib/GridConfig.h.in. Generated from configure.ac by autoheader. */
|
||||
/* lib/Config.h.in. Generated from configure.ac by autoheader. */
|
||||
|
||||
/* AVX Intrinsics */
|
||||
#undef AVX1
|
||||
@ -33,9 +33,6 @@
|
||||
/* Support AVX2 (Advanced Vector Extensions 2) instructions */
|
||||
#undef HAVE_AVX2
|
||||
|
||||
/* define if the compiler supports basic C++11 syntax */
|
||||
#undef HAVE_CXX11
|
||||
|
||||
/* Define to 1 if you have the declaration of `be64toh', and to 0 if you
|
||||
don't. */
|
||||
#undef HAVE_DECL_BE64TOH
|
86
lib/Grid.h
86
lib/Grid.h
@ -6,92 +6,48 @@
|
||||
// Copyright (c) 2014 University of Edinburgh. All rights reserved.
|
||||
//
|
||||
|
||||
|
||||
#ifndef GRID_H
|
||||
#define GRID_H
|
||||
|
||||
///////////////////
|
||||
// Std C++ dependencies
|
||||
///////////////////
|
||||
#include <cassert>
|
||||
|
||||
#include <complex>
|
||||
#include <vector>
|
||||
|
||||
#include <iostream>
|
||||
#include <iomanip>
|
||||
#include <random>
|
||||
#include <functional>
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/time.h>
|
||||
#include <stdio.h>
|
||||
#include <signal.h>
|
||||
#include <ctime>
|
||||
#include <sys/time.h>
|
||||
#include <chrono>
|
||||
|
||||
#ifndef MAX
|
||||
#define MAX(x,y) ((x)>(y)?(x):(y))
|
||||
#define MIN(x,y) ((x)>(y)?(y):(x))
|
||||
#endif
|
||||
|
||||
#define strong_inline __attribute__((always_inline)) inline
|
||||
|
||||
#include <GridConfig.h>
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// Tunable header includes
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
#ifdef HAVE_MALLOC_MALLOC_H
|
||||
#include <malloc/malloc.h>
|
||||
#endif
|
||||
#ifdef HAVE_MALLOC_H
|
||||
#include <malloc.h>
|
||||
#endif
|
||||
|
||||
///////////////////
|
||||
// Grid headers
|
||||
///////////////////
|
||||
#include <MacroMagic.h>
|
||||
#include <Config.h>
|
||||
#include <Timer.h>
|
||||
#include <Log.h>
|
||||
#include <AlignedAllocator.h>
|
||||
|
||||
#include <Simd.h>
|
||||
#include <Threads.h>
|
||||
|
||||
#include <Communicator.h> // subdir aggregate
|
||||
#include <Cartesian.h> // subdir aggregate
|
||||
#include <Tensors.h> // subdir aggregate
|
||||
#include <Lattice.h> // subdir aggregate
|
||||
#include <Cshift.h> // subdir aggregate
|
||||
#include <Stencil.h> // subdir aggregate
|
||||
#include <Algorithms.h>// subdir aggregate
|
||||
|
||||
#include <Communicator.h>
|
||||
#include <Cartesian.h>
|
||||
#include <Tensors.h>
|
||||
#include <Lattice.h>
|
||||
#include <Cshift.h>
|
||||
#include <Stencil.h>
|
||||
#include <Algorithms.h>
|
||||
#include <qcd/QCD.h>
|
||||
#include <parallelIO/NerscIO.h>
|
||||
|
||||
namespace Grid {
|
||||
#include <Init.h>
|
||||
|
||||
void Grid_init(int *argc,char ***argv);
|
||||
void Grid_finalize(void);
|
||||
// internal, controled with --handle
|
||||
void Grid_sa_signal_handler(int sig,siginfo_t *si,void * ptr);
|
||||
void Grid_debug_handler_init(void);
|
||||
void Grid_quiesce_nodes(void);
|
||||
void Grid_unquiesce_nodes(void);
|
||||
|
||||
// C++11 time facilities better?
|
||||
double usecond(void);
|
||||
|
||||
const std::vector<int> GridDefaultSimd(int dims,int nsimd);
|
||||
const std::vector<int> &GridDefaultLatt(void);
|
||||
const std::vector<int> &GridDefaultMpi(void);
|
||||
const int &GridThreads(void) ;
|
||||
void GridSetThreads(int t) ;
|
||||
|
||||
// Common parsing chores
|
||||
std::string GridCmdOptionPayload(char ** begin, char ** end, const std::string & option);
|
||||
bool GridCmdOptionExists(char** begin, char** end, const std::string& option);
|
||||
std::string GridCmdVectorIntToString(const std::vector<int> & vec);
|
||||
|
||||
void GridParseLayout(char **argv,int argc,
|
||||
std::vector<int> &latt,
|
||||
std::vector<int> &simd,
|
||||
std::vector<int> &mpi);
|
||||
|
||||
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -25,17 +25,19 @@
|
||||
|
||||
namespace Grid {
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// Convenience functions to access stadard command line arg
|
||||
// driven parallelism controls
|
||||
//////////////////////////////////////////////////////
|
||||
static std::vector<int> Grid_default_latt;
|
||||
static std::vector<int> Grid_default_mpi;
|
||||
//////////////////////////////////////////////////////
|
||||
// Convenience functions to access stadard command line arg
|
||||
// driven parallelism controls
|
||||
//////////////////////////////////////////////////////
|
||||
static std::vector<int> Grid_default_latt;
|
||||
static std::vector<int> Grid_default_mpi;
|
||||
int GridThread::_threads;
|
||||
|
||||
int GridThread::_threads;
|
||||
|
||||
const std::vector<int> GridDefaultSimd(int dims,int nsimd)
|
||||
{
|
||||
const std::vector<int> &GridDefaultLatt(void) {return Grid_default_latt;};
|
||||
const std::vector<int> &GridDefaultMpi(void) {return Grid_default_mpi;};
|
||||
const std::vector<int> GridDefaultSimd(int dims,int nsimd)
|
||||
{
|
||||
std::vector<int> layout(dims);
|
||||
int nn=nsimd;
|
||||
for(int d=dims-1;d>=0;d--){
|
||||
@ -48,15 +50,11 @@ namespace Grid {
|
||||
}
|
||||
assert(nn==1);
|
||||
return layout;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
const std::vector<int> &GridDefaultLatt(void) {return Grid_default_latt;};
|
||||
const std::vector<int> &GridDefaultMpi(void) {return Grid_default_mpi;};
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// Command line parsing assist for stock controls
|
||||
////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////
|
||||
// Command line parsing assist for stock controls
|
||||
////////////////////////////////////////////////////////////
|
||||
std::string GridCmdOptionPayload(char ** begin, char ** end, const std::string & option)
|
||||
{
|
||||
char ** itr = std::find(begin, end, option);
|
||||
@ -70,6 +68,23 @@ bool GridCmdOptionExists(char** begin, char** end, const std::string& option)
|
||||
{
|
||||
return std::find(begin, end, option) != end;
|
||||
}
|
||||
// Comma separated list
|
||||
void GridCmdOptionCSL(std::string str,std::vector<std::string> & vec)
|
||||
{
|
||||
size_t pos = 0;
|
||||
std::string token;
|
||||
std::string delimiter(",");
|
||||
|
||||
vec.resize(0);
|
||||
while ((pos = str.find(delimiter)) != std::string::npos) {
|
||||
token = str.substr(0, pos);
|
||||
vec.push_back(token);
|
||||
str.erase(0, pos + delimiter.length());
|
||||
}
|
||||
token = str;
|
||||
vec.push_back(token);
|
||||
return;
|
||||
}
|
||||
|
||||
void GridCmdOptionIntVector(std::string &str,std::vector<int> & vec)
|
||||
{
|
||||
@ -84,6 +99,7 @@ void GridCmdOptionIntVector(std::string &str,std::vector<int> & vec)
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
void GridParseLayout(char **argv,int argc,
|
||||
std::vector<int> &latt,
|
||||
std::vector<int> &mpi)
|
||||
@ -117,8 +133,9 @@ std::string GridCmdVectorIntToString(const std::vector<int> & vec){
|
||||
std::copy(vec.begin(), vec.end(),std::ostream_iterator<int>(oss, " "));
|
||||
return oss.str();
|
||||
}
|
||||
/////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////
|
||||
//
|
||||
/////////////////////////////////////////////////////////
|
||||
void Grid_init(int *argc,char ***argv)
|
||||
{
|
||||
#ifdef GRID_COMMS_MPI
|
||||
@ -126,15 +143,33 @@ void Grid_init(int *argc,char ***argv)
|
||||
#endif
|
||||
// Parse command line args.
|
||||
|
||||
GridLogger::StopWatch.Start();
|
||||
|
||||
std::string arg;
|
||||
std::vector<std::string> logstreams;
|
||||
std::string defaultLog("Error,Warning,Message,Performance");
|
||||
|
||||
GridCmdOptionCSL(defaultLog,logstreams);
|
||||
GridLogConfigure(logstreams);
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--help") ){
|
||||
std::cout<<"--help : this message"<<std::endl;
|
||||
std::cout<<"--debug-signals : catch sigsegv and print a blame report"<<std::endl;
|
||||
std::cout<<"--debug-stdout : print stdout from EVERY node"<<std::endl;
|
||||
std::cout<<"--decomposition : report on default omp,mpi and simd decomposition"<<std::endl;
|
||||
std::cout<<"--mpi n.n.n.n : default MPI decomposition"<<std::endl;
|
||||
std::cout<<"--omp n : default number of OMP threads"<<std::endl;
|
||||
std::cout<<"--grid n.n.n.n : default Grid size"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"--help : this message"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"--debug-signals : catch sigsegv and print a blame report"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"--debug-stdout : print stdout from EVERY node"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"--decomposition : report on default omp,mpi and simd decomposition"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"--mpi n.n.n.n : default MPI decomposition"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"--omp n : default number of OMP threads"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"--grid n.n.n.n : default Grid size"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"--log list : comma separted list of streams from Error,Warning,Message,Performance,Iterative,Debug"<<std::endl;
|
||||
}
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--log") ){
|
||||
arg = GridCmdOptionPayload(*argv,*argv+*argc,"--log");
|
||||
GridCmdOptionCSL(arg,logstreams);
|
||||
GridLogConfigure(logstreams);
|
||||
}
|
||||
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--debug-signals") ){
|
||||
Grid_debug_handler_init();
|
||||
}
|
||||
@ -152,38 +187,18 @@ void Grid_init(int *argc,char ***argv)
|
||||
Grid_default_latt,
|
||||
Grid_default_mpi);
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--decomposition") ){
|
||||
std::cout<<"Grid Decomposition\n";
|
||||
std::cout<<"\tOpenMP threads : "<<GridThread::GetThreads()<<std::endl;
|
||||
std::cout<<"\tMPI tasks : "<<GridCmdVectorIntToString(GridDefaultMpi())<<std::endl;
|
||||
std::cout<<"\tvRealF : "<<sizeof(vRealF)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vRealF::Nsimd()))<<std::endl;
|
||||
std::cout<<"\tvRealD : "<<sizeof(vRealD)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vRealD::Nsimd()))<<std::endl;
|
||||
std::cout<<"\tvComplexF : "<<sizeof(vComplexF)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vComplexF::Nsimd()))<<std::endl;
|
||||
std::cout<<"\tvComplexD : "<<sizeof(vComplexD)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vComplexD::Nsimd()))<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Grid Decomposition\n";
|
||||
std::cout<<GridLogMessage<<"\tOpenMP threads : "<<GridThread::GetThreads()<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tMPI tasks : "<<GridCmdVectorIntToString(GridDefaultMpi())<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tvRealF : "<<sizeof(vRealF)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vRealF::Nsimd()))<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tvRealD : "<<sizeof(vRealD)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vRealD::Nsimd()))<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tvComplexF : "<<sizeof(vComplexF)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vComplexF::Nsimd()))<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tvComplexD : "<<sizeof(vComplexD)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vComplexD::Nsimd()))<<std::endl;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// Verbose limiter on MPI tasks
|
||||
////////////////////////////////////////////////////////////
|
||||
void Grid_quiesce_nodes(void)
|
||||
{
|
||||
#ifdef GRID_COMMS_MPI
|
||||
int me;
|
||||
MPI_Comm_rank(MPI_COMM_WORLD,&me);
|
||||
if ( me ) {
|
||||
std::cout.setstate(std::ios::badbit);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
void Grid_unquiesce_nodes(void)
|
||||
{
|
||||
#ifdef GRID_COMMS_MPI
|
||||
std::cout.clear();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void Grid_finalize(void)
|
||||
{
|
32
lib/Init.h
Normal file
32
lib/Init.h
Normal file
@ -0,0 +1,32 @@
|
||||
#ifndef GRID_INIT_H
|
||||
#define GRID_INIT_H
|
||||
|
||||
namespace Grid {
|
||||
|
||||
void Grid_init(int *argc,char ***argv);
|
||||
void Grid_finalize(void);
|
||||
// internal, controled with --handle
|
||||
void Grid_sa_signal_handler(int sig,siginfo_t *si,void * ptr);
|
||||
void Grid_debug_handler_init(void);
|
||||
void Grid_quiesce_nodes(void);
|
||||
void Grid_unquiesce_nodes(void);
|
||||
|
||||
const std::vector<int> GridDefaultSimd(int dims,int nsimd);
|
||||
const std::vector<int> &GridDefaultLatt(void);
|
||||
const std::vector<int> &GridDefaultMpi(void);
|
||||
const int &GridThreads(void) ;
|
||||
void GridSetThreads(int t) ;
|
||||
|
||||
// Common parsing chores
|
||||
std::string GridCmdOptionPayload(char ** begin, char ** end, const std::string & option);
|
||||
bool GridCmdOptionExists(char** begin, char** end, const std::string& option);
|
||||
std::string GridCmdVectorIntToString(const std::vector<int> & vec);
|
||||
|
||||
void GridParseLayout(char **argv,int argc,
|
||||
std::vector<int> &latt,
|
||||
std::vector<int> &simd,
|
||||
std::vector<int> &mpi);
|
||||
|
||||
|
||||
};
|
||||
#endif
|
62
lib/Log.cc
Normal file
62
lib/Log.cc
Normal file
@ -0,0 +1,62 @@
|
||||
#include <Grid.h>
|
||||
|
||||
namespace Grid {
|
||||
|
||||
GridStopWatch GridLogger::StopWatch;
|
||||
std::ostream GridLogger::devnull(0);
|
||||
|
||||
GridLogger GridLogError (1,"Error");
|
||||
GridLogger GridLogWarning (1,"Warning");
|
||||
GridLogger GridLogMessage (1,"Message");
|
||||
GridLogger GridLogDebug (1,"Debug");
|
||||
GridLogger GridLogPerformance(1,"Performance");
|
||||
GridLogger GridLogIterative (1,"Iterative");
|
||||
|
||||
void GridLogConfigure(std::vector<std::string> &logstreams)
|
||||
{
|
||||
GridLogError.Active(0);
|
||||
GridLogWarning.Active(0);
|
||||
GridLogMessage.Active(0);
|
||||
GridLogIterative.Active(0);
|
||||
GridLogDebug.Active(0);
|
||||
GridLogPerformance.Active(0);
|
||||
|
||||
for(int i=0;i<logstreams.size();i++){
|
||||
if ( logstreams[i]== std::string("Error") ) GridLogError.Active(1);
|
||||
if ( logstreams[i]== std::string("Warning") ) GridLogWarning.Active(1);
|
||||
if ( logstreams[i]== std::string("Message") ) GridLogMessage.Active(1);
|
||||
if ( logstreams[i]== std::string("Iterative") ) GridLogIterative.Active(1);
|
||||
if ( logstreams[i]== std::string("Debug") ) GridLogDebug.Active(1);
|
||||
if ( logstreams[i]== std::string("Performance") ) GridLogPerformance.Active(1);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// Verbose limiter on MPI tasks
|
||||
////////////////////////////////////////////////////////////
|
||||
void Grid_quiesce_nodes(void)
|
||||
{
|
||||
#ifdef GRID_COMMS_MPI
|
||||
int me;
|
||||
MPI_Comm_rank(MPI_COMM_WORLD,&me);
|
||||
if ( me ) {
|
||||
std::cout.setstate(std::ios::badbit);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Grid_unquiesce_nodes(void)
|
||||
{
|
||||
#ifdef GRID_COMMS_MPI
|
||||
std::cout.clear();
|
||||
#endif
|
||||
}
|
||||
|
||||
std::ostream& operator<< (std::ostream& stream, const GridTime& time)
|
||||
{
|
||||
stream << time.count()<<" ms";
|
||||
return stream;
|
||||
}
|
||||
|
||||
}
|
||||
|
46
lib/Log.h
Normal file
46
lib/Log.h
Normal file
@ -0,0 +1,46 @@
|
||||
#ifndef GRID_LOG_H
|
||||
#define GRID_LOG_H
|
||||
namespace Grid {
|
||||
|
||||
// Dress the output; use std::chrono for time stamping via the StopWatch class
|
||||
|
||||
std::ostream& operator<< (std::ostream& stream, const GridTime& time);
|
||||
|
||||
class GridLogger {
|
||||
int active;
|
||||
std::string name;
|
||||
public:
|
||||
|
||||
static GridStopWatch StopWatch;
|
||||
static std::ostream devnull;
|
||||
|
||||
GridLogger(int on, std::string nm): active(on), name(nm) {
|
||||
};
|
||||
|
||||
void Active(int on) {active = on;};
|
||||
|
||||
friend std::ostream& operator<< (std::ostream& stream, const GridLogger& log){
|
||||
if ( log.active ) {
|
||||
StopWatch.Stop();
|
||||
GridTime now = StopWatch.Elapsed();
|
||||
StopWatch.Start();
|
||||
stream << "Grid : "<<log.name << " : " << now << " : ";
|
||||
return stream;
|
||||
} else {
|
||||
return devnull;
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
void GridLogConfigure(std::vector<std::string> &logstreams);
|
||||
|
||||
extern GridLogger GridLogError;
|
||||
extern GridLogger GridLogWarning;
|
||||
extern GridLogger GridLogMessage;
|
||||
extern GridLogger GridLogDebug ;
|
||||
extern GridLogger GridLogPerformance;
|
||||
extern GridLogger GridLogIterative ;
|
||||
|
||||
}
|
||||
#endif
|
78
lib/MacroMagic.h
Normal file
78
lib/MacroMagic.h
Normal file
@ -0,0 +1,78 @@
|
||||
#ifndef GRID_MACRO_MAGIC_H
|
||||
#define GRID_MACRO_MAGIC_H
|
||||
|
||||
#define strong_inline __attribute__((always_inline)) inline
|
||||
|
||||
#ifndef MAX
|
||||
#define MAX(x,y) ((x)>(y)?(x):(y))
|
||||
#define MIN(x,y) ((x)>(y)?(y):(x))
|
||||
#endif
|
||||
|
||||
#define GRID_MACRO_FIRST(a, ...) a
|
||||
#define GRID_MACRO_SECOND(a, b, ...) b
|
||||
|
||||
#define GRID_MACRO_EMPTY()
|
||||
|
||||
#define GRID_MACRO_EVAL(...) GRID_MACRO_EVAL1024(__VA_ARGS__)
|
||||
#define GRID_MACRO_EVAL1024(...) GRID_MACRO_EVAL512(GRID_MACRO_EVAL512(__VA_ARGS__))
|
||||
#define GRID_MACRO_EVAL512(...) GRID_MACRO_EVAL256(GRID_MACRO_EVAL256(__VA_ARGS__))
|
||||
#define GRID_MACRO_EVAL256(...) GRID_MACRO_EVAL128(GRID_MACRO_EVAL128(__VA_ARGS__))
|
||||
#define GRID_MACRO_EVAL128(...) GRID_MACRO_EVAL64(GRID_MACRO_EVAL64(__VA_ARGS__))
|
||||
#define GRID_MACRO_EVAL64(...) GRID_MACRO_EVAL32(GRID_MACRO_EVAL32(__VA_ARGS__))
|
||||
#define GRID_MACRO_EVAL32(...) GRID_MACRO_EVAL16(GRID_MACRO_EVAL16(__VA_ARGS__))
|
||||
#define GRID_MACRO_EVAL16(...) GRID_MACRO_EVAL8(GRID_MACRO_EVAL8(__VA_ARGS__))
|
||||
#define GRID_MACRO_EVAL8(...) GRID_MACRO_EVAL4(GRID_MACRO_EVAL4(__VA_ARGS__))
|
||||
#define GRID_MACRO_EVAL4(...) GRID_MACRO_EVAL2(GRID_MACRO_EVAL2(__VA_ARGS__))
|
||||
#define GRID_MACRO_EVAL2(...) GRID_MACRO_EVAL1(GRID_MACRO_EVAL1(__VA_ARGS__))
|
||||
#define GRID_MACRO_EVAL1(...) __VA_ARGS__
|
||||
|
||||
#define GRID_MACRO_DEFER1(m) m GRID_MACRO_EMPTY()
|
||||
#define GRID_MACRO_DEFER2(m) m GRID_MACRO_EMPTY GRID_MACRO_EMPTY()()
|
||||
#define GRID_MACRO_DEFER3(m) m GRID_MACRO_EMPTY GRID_MACRO_EMPTY GRID_MACRO_EMPTY()()()
|
||||
#define GRID_MACRO_DEFER4(m) m GRID_MACRO_EMPTY GRID_MACRO_EMPTY GRID_MACRO_EMPTY GRID_MACRO_EMPTY()()()()
|
||||
|
||||
#define GRID_MACRO_IS_PROBE(...) GRID_MACRO_SECOND(__VA_ARGS__, 0)
|
||||
#define GRID_MACRO_PROBE() ~, 1
|
||||
|
||||
#define GRID_MACRO_CAT(a,b) a ## b
|
||||
|
||||
#define GRID_MACRO_NOT(x) GRID_MACRO_IS_PROBE(GRID_MACRO_CAT(_GRID_MACRO_NOT_, x))
|
||||
#define _GRID_MACRO_NOT_0 GRID_MACRO_PROBE()
|
||||
|
||||
#define GRID_MACRO_BOOL(x) GRID_MACRO_NOT(GRID_MACRO_NOT(x))
|
||||
|
||||
#define GRID_MACRO_IF_ELSE(condition) _GRID_MACRO_IF_ELSE(GRID_MACRO_BOOL(condition))
|
||||
#define _GRID_MACRO_IF_ELSE(condition) GRID_MACRO_CAT(_GRID_MACRO_IF_, condition)
|
||||
|
||||
#define _GRID_MACRO_IF_1(...) __VA_ARGS__ _GRID_MACRO_IF_1_ELSE
|
||||
#define _GRID_MACRO_IF_0(...) _GRID_MACRO_IF_0_ELSE
|
||||
|
||||
#define _GRID_MACRO_IF_1_ELSE(...)
|
||||
#define _GRID_MACRO_IF_0_ELSE(...) __VA_ARGS__
|
||||
|
||||
#define GRID_MACRO_HAS_ARGS(...) GRID_MACRO_BOOL(GRID_MACRO_FIRST(_GRID_MACRO_END_OF_ARGUMENTS_ __VA_ARGS__)())
|
||||
#define _GRID_MACRO_END_OF_ARGUMENTS_() 0
|
||||
|
||||
#define GRID_MACRO_MAP(m, first, second, ...) \
|
||||
m(first,second) \
|
||||
GRID_MACRO_IF_ELSE(GRID_MACRO_HAS_ARGS(__VA_ARGS__))( \
|
||||
GRID_MACRO_DEFER4(_GRID_MACRO_MAP)()(m, __VA_ARGS__) \
|
||||
)( \
|
||||
/* Do nothing, just terminate */ \
|
||||
)
|
||||
#define _GRID_MACRO_MAP() GRID_MACRO_MAP
|
||||
|
||||
#define GRID_MACRO_MEMBER(A,B) A B;
|
||||
|
||||
#define GRID_MACRO_OS_WRITE_MEMBER(A,B) os<< #A <<" "#B <<" = "<< obj. B <<" ; " <<std::endl;
|
||||
|
||||
#define GRID_DECL_CLASS_MEMBERS(cname,...) \
|
||||
GRID_MACRO_EVAL(GRID_MACRO_MAP(GRID_MACRO_MEMBER,__VA_ARGS__)) \
|
||||
friend std::ostream & operator << (std::ostream &os, const cname &obj ) { \
|
||||
os<<"class "<<#cname<<" {"<<std::endl;\
|
||||
GRID_MACRO_EVAL(GRID_MACRO_MAP(GRID_MACRO_OS_WRITE_MEMBER,__VA_ARGS__)) \
|
||||
os<<"}"; \
|
||||
return os;\
|
||||
};
|
||||
|
||||
#endif
|
@ -1,4 +1,4 @@
|
||||
|
||||
HFILES=./Cshift.h ./simd/Grid_avx.h ./simd/Grid_vector_types.h ./simd/Grid_sse4.h ./simd/Grid_avx512.h ./simd/Grid_empty.h ./simd/Grid_vector_unops.h ./simd/Grid_neon.h ./simd/Grid_qpx.h ./Tensors.h ./Algorithms.h ./communicator/Communicator_base.h ./lattice/Lattice_rng.h ./lattice/Lattice_reduction.h ./lattice/Lattice_transfer.h ./lattice/Lattice_unary.h ./lattice/Lattice_peekpoke.h ./lattice/Lattice_coordinate.h ./lattice/Lattice_comparison.h ./lattice/Lattice_overload.h ./lattice/Lattice_reality.h ./lattice/Lattice_local.h ./lattice/Lattice_conformable.h ./lattice/Lattice_where.h ./lattice/Lattice_comparison_utils.h ./lattice/Lattice_arith.h ./lattice/Lattice_base.h ./lattice/Lattice_ET.h ./lattice/Lattice_transpose.h ./lattice/Lattice_trace.h ./Stencil.h ./tensors/Tensor_arith_sub.h ./tensors/Tensor_exp.h ./tensors/Tensor_arith_mul.h ./tensors/Tensor_class.h ./tensors/Tensor_logical.h ./tensors/Tensor_transpose.h ./tensors/Tensor_arith_mac.h ./tensors/Tensor_arith_scalar.h ./tensors/Tensor_reality.h ./tensors/Tensor_trace.h ./tensors/Tensor_index.h ./tensors/Tensor_arith_add.h ./tensors/Tensor_outer.h ./tensors/Tensor_inner.h ./tensors/Tensor_traits.h ./tensors/Tensor_Ta.h ./tensors/Tensor_unary.h ./tensors/Tensor_determinant.h ./tensors/Tensor_arith.h ./tensors/Tensor_extract_merge.h ./Communicator.h ./Cartesian.h ./parallelIO/NerscIO.h ./qcd/QCD.h ./qcd/hmc/integrators/Integrator.h ./qcd/hmc/integrators/Integrator_algorithm.h ./qcd/hmc/HMC.h ./qcd/utils/SpaceTimeGrid.h ./qcd/utils/SUn.h ./qcd/utils/LinalgUtils.h ./qcd/utils/CovariantCshift.h ./qcd/utils/WilsonLoops.h ./qcd/action/ActionBase.h ./qcd/action/gauge/WilsonGaugeAction.h ./qcd/action/Actions.h ./qcd/action/fermion/CayleyFermion5D.h ./qcd/action/fermion/ScaledShamirFermion.h ./qcd/action/fermion/MobiusFermion.h ./qcd/action/fermion/OverlapWilsonContfracTanhFermion.h ./qcd/action/fermion/PartialFractionFermion5D.h ./qcd/action/fermion/ShamirZolotarevFermion.h ./qcd/action/fermion/FermionOperator.h ./qcd/action/fermion/WilsonFermion5D.h ./qcd/action/fermion/WilsonCompressor.h ./qcd/action/fermion/OverlapWilsonPartialFractionZolotarevFermion.h ./qcd/action/fermion/WilsonKernels.h ./qcd/action/fermion/DomainWallFermion.h ./qcd/action/fermion/OverlapWilsonPartialFractionTanhFermion.h ./qcd/action/fermion/OverlapWilsonContfracZolotarevFermion.h ./qcd/action/fermion/MobiusZolotarevFermion.h ./qcd/action/fermion/g5HermitianLinop.h ./qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h ./qcd/action/fermion/WilsonFermion.h ./qcd/action/fermion/ContinuedFractionFermion5D.h ./qcd/action/fermion/OverlapWilsonCayleyZolotarevFermion.h ./qcd/spin/TwoSpinor.h ./qcd/spin/Dirac.h ./cshift/Cshift_common.h ./cshift/Cshift_none.h ./cshift/Cshift_mpi.h ./Simd.h ./GridConfig.h ./cartesian/Cartesian_base.h ./cartesian/Cartesian_red_black.h ./cartesian/Cartesian_full.h ./AlignedAllocator.h ./Lattice.h ./Old/Tensor_poke.h ./Old/Tensor_peek.h ./Threads.h ./Grid.h ./algorithms/Preconditioner.h ./algorithms/iterative/ConjugateResidual.h ./algorithms/iterative/PrecGeneralisedConjugateResidual.h ./algorithms/iterative/ConjugateGradientMultiShift.h ./algorithms/iterative/SchurRedBlack.h ./algorithms/iterative/NormalEquations.h ./algorithms/iterative/ConjugateGradient.h ./algorithms/iterative/AdefGeneric.h ./algorithms/approx/Chebyshev.h ./algorithms/approx/Zolotarev.h ./algorithms/approx/MultiShiftFunction.h ./algorithms/approx/bigfloat.h ./algorithms/approx/bigfloat_double.h ./algorithms/approx/Remez.h ./algorithms/LinearOperator.h ./algorithms/SparseMatrix.h ./algorithms/CoarsenedMatrix.h ./stencil/Lebesgue.h
|
||||
HFILES=./Cshift.h ./simd/Grid_avx.h ./simd/Grid_vector_types.h ./simd/Grid_sse4.h ./simd/Grid_avx512.h ./simd/Grid_empty.h ./simd/Grid_vector_unops.h ./simd/Grid_neon.h ./simd/Grid_qpx.h ./Init.h ./Tensors.h ./Algorithms.h ./communicator/Communicator_base.h ./lattice/Lattice_rng.h ./lattice/Lattice_reduction.h ./lattice/Lattice_transfer.h ./lattice/Lattice_unary.h ./lattice/Lattice_peekpoke.h ./lattice/Lattice_coordinate.h ./lattice/Lattice_comparison.h ./lattice/Lattice_overload.h ./lattice/Lattice_reality.h ./lattice/Lattice_local.h ./lattice/Lattice_conformable.h ./lattice/Lattice_where.h ./lattice/Lattice_comparison_utils.h ./lattice/Lattice_arith.h ./lattice/Lattice_base.h ./lattice/Lattice_ET.h ./lattice/Lattice_transpose.h ./lattice/Lattice_trace.h ./Stencil.h ./tensors/Tensor_arith_sub.h ./tensors/Tensor_exp.h ./tensors/Tensor_arith_mul.h ./tensors/Tensor_class.h ./tensors/Tensor_logical.h ./tensors/Tensor_transpose.h ./tensors/Tensor_arith_mac.h ./tensors/Tensor_arith_scalar.h ./tensors/Tensor_reality.h ./tensors/Tensor_trace.h ./tensors/Tensor_index.h ./tensors/Tensor_arith_add.h ./tensors/Tensor_outer.h ./tensors/Tensor_inner.h ./tensors/Tensor_traits.h ./tensors/Tensor_Ta.h ./tensors/Tensor_unary.h ./tensors/Tensor_determinant.h ./tensors/Tensor_arith.h ./tensors/Tensor_extract_merge.h ./Communicator.h ./Cartesian.h ./parallelIO/NerscIO.h ./Timer.h ./qcd/QCD.h ./qcd/hmc/integrators/Integrator_base.h ./qcd/hmc/integrators/Integrator.h ./qcd/hmc/integrators/Integrator_algorithm.h ./qcd/hmc/HMC.h ./qcd/utils/SpaceTimeGrid.h ./qcd/utils/SUn.h ./qcd/utils/LinalgUtils.h ./qcd/utils/CovariantCshift.h ./qcd/utils/WilsonLoops.h ./qcd/action/ActionBase.h ./qcd/action/gauge/WilsonGaugeAction.h ./qcd/action/Actions.h ./qcd/action/pseudofermion/TwoFlavour.h ./qcd/action/fermion/CayleyFermion5D.h ./qcd/action/fermion/ScaledShamirFermion.h ./qcd/action/fermion/MobiusFermion.h ./qcd/action/fermion/OverlapWilsonContfracTanhFermion.h ./qcd/action/fermion/PartialFractionFermion5D.h ./qcd/action/fermion/ShamirZolotarevFermion.h ./qcd/action/fermion/FermionOperator.h ./qcd/action/fermion/WilsonFermion5D.h ./qcd/action/fermion/WilsonCompressor.h ./qcd/action/fermion/OverlapWilsonPartialFractionZolotarevFermion.h ./qcd/action/fermion/WilsonKernels.h ./qcd/action/fermion/DomainWallFermion.h ./qcd/action/fermion/OverlapWilsonPartialFractionTanhFermion.h ./qcd/action/fermion/OverlapWilsonContfracZolotarevFermion.h ./qcd/action/fermion/MobiusZolotarevFermion.h ./qcd/action/fermion/g5HermitianLinop.h ./qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h ./qcd/action/fermion/WilsonFermion.h ./qcd/action/fermion/ContinuedFractionFermion5D.h ./qcd/action/fermion/OverlapWilsonCayleyZolotarevFermion.h ./qcd/spin/TwoSpinor.h ./qcd/spin/Dirac.h ./Log.h ./cshift/Cshift_common.h ./cshift/Cshift_none.h ./cshift/Cshift_mpi.h ./Simd.h ./MacroMagic.h ./Config.h ./cartesian/Cartesian_base.h ./cartesian/Cartesian_red_black.h ./cartesian/Cartesian_full.h ./AlignedAllocator.h ./Lattice.h ./Old/Tensor_poke.h ./Old/Tensor_peek.h ./Threads.h ./Grid.h ./algorithms/Preconditioner.h ./algorithms/iterative/ConjugateResidual.h ./algorithms/iterative/PrecGeneralisedConjugateResidual.h ./algorithms/iterative/ConjugateGradientMultiShift.h ./algorithms/iterative/SchurRedBlack.h ./algorithms/iterative/NormalEquations.h ./algorithms/iterative/ConjugateGradient.h ./algorithms/iterative/AdefGeneric.h ./algorithms/approx/Chebyshev.h ./algorithms/approx/Zolotarev.h ./algorithms/approx/MultiShiftFunction.h ./algorithms/approx/bigfloat.h ./algorithms/approx/bigfloat_double.h ./algorithms/approx/Remez.h ./algorithms/LinearOperator.h ./algorithms/SparseMatrix.h ./algorithms/CoarsenedMatrix.h ./stencil/Lebesgue.h
|
||||
|
||||
CCFILES=./qcd/hmc/integrators/Integrator.cc ./qcd/hmc/HMC.cc ./qcd/utils/SpaceTimeGrid.cc ./qcd/action/fermion/WilsonKernels.cc ./qcd/action/fermion/PartialFractionFermion5D.cc ./qcd/action/fermion/CayleyFermion5D.cc ./qcd/action/fermion/WilsonKernelsHand.cc ./qcd/action/fermion/WilsonFermion.cc ./qcd/action/fermion/ContinuedFractionFermion5D.cc ./qcd/action/fermion/WilsonFermion5D.cc ./qcd/spin/Dirac.cc ./GridInit.cc ./algorithms/approx/MultiShiftFunction.cc ./algorithms/approx/Remez.cc ./algorithms/approx/Zolotarev.cc ./stencil/Lebesgue.cc ./stencil/Stencil_common.cc
|
||||
CCFILES=./qcd/hmc/integrators/Integrator.cc ./qcd/hmc/HMC.cc ./qcd/utils/SpaceTimeGrid.cc ./qcd/action/fermion/WilsonKernels.cc ./qcd/action/fermion/PartialFractionFermion5D.cc ./qcd/action/fermion/CayleyFermion5D.cc ./qcd/action/fermion/WilsonKernelsHand.cc ./qcd/action/fermion/WilsonFermion.cc ./qcd/action/fermion/ContinuedFractionFermion5D.cc ./qcd/action/fermion/WilsonFermion5D.cc ./qcd/spin/Dirac.cc ./Init.cc ./Log.cc ./algorithms/approx/MultiShiftFunction.cc ./algorithms/approx/Remez.cc ./algorithms/approx/Zolotarev.cc ./stencil/Lebesgue.cc ./stencil/Stencil_common.cc
|
||||
|
52
lib/Timer.h
Normal file
52
lib/Timer.h
Normal file
@ -0,0 +1,52 @@
|
||||
#ifndef GRID_TIME_H
|
||||
#define GRID_TIME_H
|
||||
|
||||
#include <sys/time.h>
|
||||
#include <ctime>
|
||||
#include <chrono>
|
||||
|
||||
namespace Grid {
|
||||
|
||||
|
||||
// Dress the output; use std::chrono
|
||||
|
||||
// C++11 time facilities better?
|
||||
double usecond(void);
|
||||
|
||||
typedef std::chrono::system_clock GridClock;
|
||||
typedef std::chrono::time_point<GridClock> GridTimePoint;
|
||||
typedef std::chrono::milliseconds GridTime;
|
||||
|
||||
|
||||
class GridStopWatch {
|
||||
private:
|
||||
bool running;
|
||||
GridTimePoint start;
|
||||
GridTime accumulator;
|
||||
public:
|
||||
GridStopWatch () {
|
||||
Reset();
|
||||
}
|
||||
void Start(void) {
|
||||
assert(running == false);
|
||||
start = GridClock::now();
|
||||
running = true;
|
||||
}
|
||||
void Stop(void) {
|
||||
assert(running == true);
|
||||
accumulator+= std::chrono::duration_cast<GridTime>(GridClock::now()-start);
|
||||
running = false;
|
||||
};
|
||||
void Reset(void){
|
||||
running = false;
|
||||
start = GridClock::now();
|
||||
accumulator = std::chrono::duration_cast<GridTime>(start-start);
|
||||
}
|
||||
GridTime Elapsed(void) {
|
||||
assert(running == false);
|
||||
return accumulator;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
@ -32,12 +32,12 @@ namespace Grid {
|
||||
displacements[2*_d]=0;
|
||||
|
||||
//// report back
|
||||
std::cout<<"directions :";
|
||||
std::cout<<GridLogMessage<<"directions :";
|
||||
for(int d=0;d<npoint;d++) std::cout<< directions[d]<< " ";
|
||||
std::cout <<std::endl;
|
||||
std::cout<<"displacements :";
|
||||
std::cout<<GridLogMessage<<"displacements :";
|
||||
for(int d=0;d<npoint;d++) std::cout<< displacements[d]<< " ";
|
||||
std::cout <<std::endl;
|
||||
std::cout<<std::endl;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -100,9 +100,9 @@ namespace Grid {
|
||||
eProj._odata[ss](i)=CComplex(1.0);
|
||||
}
|
||||
eProj=eProj - iProj;
|
||||
std::cout<<"Orthog check error "<<i<<" " << norm2(eProj)<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Orthog check error "<<i<<" " << norm2(eProj)<<std::endl;
|
||||
}
|
||||
std::cout <<"CheckOrthog done"<<std::endl;
|
||||
std::cout<<GridLogMessage <<"CheckOrthog done"<<std::endl;
|
||||
}
|
||||
void ProjectToSubspace(CoarseVector &CoarseVec,const FineField &FineVec){
|
||||
blockProject(CoarseVec,FineVec,subspace);
|
||||
@ -113,7 +113,7 @@ namespace Grid {
|
||||
void CreateSubspaceRandom(GridParallelRNG &RNG){
|
||||
for(int i=0;i<nbasis;i++){
|
||||
random(RNG,subspace[i]);
|
||||
std::cout<<" norm subspace["<<i<<"] "<<norm2(subspace[i])<<std::endl;
|
||||
std::cout<<GridLogMessage<<" norm subspace["<<i<<"] "<<norm2(subspace[i])<<std::endl;
|
||||
}
|
||||
Orthogonalise();
|
||||
}
|
||||
@ -121,7 +121,7 @@ namespace Grid {
|
||||
|
||||
RealD scale;
|
||||
|
||||
ConjugateGradient<FineField> CG(2.0e-3,10000);
|
||||
ConjugateGradient<FineField> CG(1.0e-2,10000);
|
||||
FineField noise(FineGrid);
|
||||
FineField Mn(FineGrid);
|
||||
|
||||
@ -131,7 +131,7 @@ namespace Grid {
|
||||
scale = std::pow(norm2(noise),-0.5);
|
||||
noise=noise*scale;
|
||||
|
||||
hermop.Op(noise,Mn); std::cout << "noise ["<<b<<"] <n|MdagM|n> "<<norm2(Mn)<<std::endl;
|
||||
hermop.Op(noise,Mn); std::cout<<GridLogMessage << "noise ["<<b<<"] <n|MdagM|n> "<<norm2(Mn)<<std::endl;
|
||||
|
||||
for(int i=0;i<1;i++){
|
||||
|
||||
@ -143,7 +143,7 @@ namespace Grid {
|
||||
|
||||
}
|
||||
|
||||
hermop.Op(noise,Mn); std::cout << "filtered["<<b<<"] <f|MdagM|f> "<<norm2(Mn)<<std::endl;
|
||||
hermop.Op(noise,Mn); std::cout<<GridLogMessage << "filtered["<<b<<"] <f|MdagM|f> "<<norm2(Mn)<<std::endl;
|
||||
subspace[b] = noise;
|
||||
|
||||
}
|
||||
@ -189,7 +189,7 @@ namespace Grid {
|
||||
SimpleCompressor<siteVector> compressor;
|
||||
Stencil.HaloExchange(in,comm_buf,compressor);
|
||||
|
||||
//PARALLEL_FOR_LOOP
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int ss=0;ss<Grid()->oSites();ss++){
|
||||
siteVector res = zero;
|
||||
siteVector nbr;
|
||||
@ -252,10 +252,6 @@ namespace Grid {
|
||||
|
||||
// Orthogonalise the subblocks over the basis
|
||||
blockOrthogonalise(InnerProd,Subspace.subspace);
|
||||
//Subspace.Orthogonalise();
|
||||
// Subspace.CheckOrthogonal();
|
||||
//Subspace.Orthogonalise();
|
||||
// Subspace.CheckOrthogonal();
|
||||
|
||||
// Compute the matrix elements of linop between this orthonormal
|
||||
// set of vectors.
|
||||
@ -306,6 +302,7 @@ namespace Grid {
|
||||
Subspace.ProjectToSubspace(oProj,oblock);
|
||||
// blockProject(iProj,iblock,Subspace.subspace);
|
||||
// blockProject(oProj,oblock,Subspace.subspace);
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int ss=0;ss<Grid()->oSites();ss++){
|
||||
for(int j=0;j<nbasis;j++){
|
||||
if( disp!= 0 ) {
|
||||
@ -321,12 +318,12 @@ namespace Grid {
|
||||
///////////////////////////
|
||||
// test code worth preserving in if block
|
||||
///////////////////////////
|
||||
std::cout<< " Computed matrix elements "<< self_stencil <<std::endl;
|
||||
std::cout<<GridLogMessage<< " Computed matrix elements "<< self_stencil <<std::endl;
|
||||
for(int p=0;p<geom.npoint;p++){
|
||||
std::cout<< "A["<<p<<"]" << std::endl;
|
||||
std::cout<< A[p] << std::endl;
|
||||
std::cout<<GridLogMessage<< "A["<<p<<"]" << std::endl;
|
||||
std::cout<<GridLogMessage<< A[p] << std::endl;
|
||||
}
|
||||
std::cout<< " picking by block0 "<< self_stencil <<std::endl;
|
||||
std::cout<<GridLogMessage<< " picking by block0 "<< self_stencil <<std::endl;
|
||||
|
||||
phi=Subspace.subspace[0];
|
||||
std::vector<int> bc(FineGrid->_ndimension,0);
|
||||
@ -334,9 +331,9 @@ namespace Grid {
|
||||
blockPick(Grid(),phi,tmp,bc); // Pick out a block
|
||||
linop.Op(tmp,Mphi); // Apply big dop
|
||||
blockProject(iProj,Mphi,Subspace.subspace); // project it and print it
|
||||
std::cout<< " Computed matrix elements from block zero only "<<std::endl;
|
||||
std::cout<< iProj <<std::endl;
|
||||
std::cout<<"Computed Coarse Operator"<<std::endl;
|
||||
std::cout<<GridLogMessage<< " Computed matrix elements from block zero only "<<std::endl;
|
||||
std::cout<<GridLogMessage<< iProj <<std::endl;
|
||||
std::cout<<GridLogMessage<<"Computed Coarse Operator"<<std::endl;
|
||||
#endif
|
||||
// ForceHermitian();
|
||||
AssertHermitian();
|
||||
@ -345,9 +342,9 @@ namespace Grid {
|
||||
void ForceDiagonal(void) {
|
||||
|
||||
|
||||
std::cout<<"**************************************************"<<std::endl;
|
||||
std::cout<<"**** Forcing coarse operator to be diagonal ****"<<std::endl;
|
||||
std::cout<<"**************************************************"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"**************************************************"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"**** Forcing coarse operator to be diagonal ****"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"**************************************************"<<std::endl;
|
||||
for(int p=0;p<8;p++){
|
||||
A[p]=zero;
|
||||
}
|
||||
@ -387,13 +384,13 @@ namespace Grid {
|
||||
|
||||
Diff = AA - adj(AAc);
|
||||
|
||||
std::cout<<"Norm diff dim "<<d<<" "<< norm2(Diff)<<std::endl;
|
||||
std::cout<<"Norm dim "<<d<<" "<< norm2(AA)<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Norm diff dim "<<d<<" "<< norm2(Diff)<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Norm dim "<<d<<" "<< norm2(AA)<<std::endl;
|
||||
|
||||
}
|
||||
Diff = A[8] - adj(A[8]);
|
||||
std::cout<<"Norm diff local "<< norm2(Diff)<<std::endl;
|
||||
std::cout<<"Norm local "<< norm2(A[8])<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Norm diff local "<< norm2(Diff)<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Norm local "<< norm2(A[8])<<std::endl;
|
||||
}
|
||||
|
||||
};
|
||||
|
@ -120,7 +120,7 @@ namespace Grid {
|
||||
Field *Tn = &T1;
|
||||
Field *Tnp = &T2;
|
||||
|
||||
std::cout << "Chebyshev ["<<lo<<","<<hi<<"]"<< " order "<<order <<std::endl;
|
||||
std::cout<<GridLogMessage << "Chebyshev ["<<lo<<","<<hi<<"]"<< " order "<<order <<std::endl;
|
||||
// Tn=T1 = (xscale M + mscale)in
|
||||
double xscale = 2.0/(hi-lo);
|
||||
double mscale = -(hi+lo)/(hi-lo);
|
||||
|
@ -1,6 +1,8 @@
|
||||
#ifndef MULTI_SHIFT_FUNCTION
|
||||
#define MULTI_SHIFT_FUNCTION
|
||||
|
||||
namespace Grid {
|
||||
|
||||
class MultiShiftFunction {
|
||||
public:
|
||||
int order;
|
||||
|
@ -757,3 +757,4 @@ void AlgRemez::csv(std::ostream & os)
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,8 @@
|
||||
#ifndef INCLUDED_ALG_REMEZ_H
|
||||
#define INCLUDED_ALG_REMEZ_H
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <algorithms/approx/bigfloat.h>
|
||||
|
||||
#define JMAX 10000 //Maximum number of iterations of Newton's approximation
|
||||
@ -28,6 +30,7 @@
|
||||
remez.getIPFE(res,pole,&norm);
|
||||
remez.csv(ostream &os);
|
||||
*/
|
||||
|
||||
class AlgRemez
|
||||
{
|
||||
private:
|
||||
|
@ -149,7 +149,7 @@ class TwoLevelFlexiblePcg : public LinearFunction<Field>
|
||||
}
|
||||
|
||||
RealD rrn=sqrt(rn/ssq);
|
||||
std::cout<<"TwoLevelfPcg: k= "<<k<<" residual = "<<rrn<<std::endl;
|
||||
std::cout<<GridLogMessage<<"TwoLevelfPcg: k= "<<k<<" residual = "<<rrn<<std::endl;
|
||||
|
||||
// Stopping condition
|
||||
if ( rn <= rsq ) {
|
||||
@ -161,8 +161,8 @@ class TwoLevelFlexiblePcg : public LinearFunction<Field>
|
||||
RealD srcnorm = sqrt(norm2(src));
|
||||
RealD tmpnorm = sqrt(norm2(tmp));
|
||||
RealD true_residual = tmpnorm/srcnorm;
|
||||
std::cout<<"TwoLevelfPcg: true residual is "<<true_residual<<std::endl;
|
||||
std::cout<<"TwoLevelfPcg: target residual was"<<Tolerance<<std::endl;
|
||||
std::cout<<GridLogMessage<<"TwoLevelfPcg: true residual is "<<true_residual<<std::endl;
|
||||
std::cout<<GridLogMessage<<"TwoLevelfPcg: target residual was"<<Tolerance<<std::endl;
|
||||
return k;
|
||||
}
|
||||
}
|
||||
|
@ -13,9 +13,7 @@ namespace Grid {
|
||||
public:
|
||||
RealD Tolerance;
|
||||
Integer MaxIterations;
|
||||
int verbose;
|
||||
ConjugateGradient(RealD tol,Integer maxit) : Tolerance(tol), MaxIterations(maxit) {
|
||||
verbose=0;
|
||||
};
|
||||
|
||||
|
||||
@ -42,14 +40,12 @@ public:
|
||||
cp =a;
|
||||
ssq=norm2(src);
|
||||
|
||||
if ( verbose ) {
|
||||
std::cout <<std::setprecision(4)<< "ConjugateGradient: guess "<<guess<<std::endl;
|
||||
std::cout <<std::setprecision(4)<< "ConjugateGradient: src "<<ssq <<std::endl;
|
||||
std::cout <<std::setprecision(4)<< "ConjugateGradient: mp "<<d <<std::endl;
|
||||
std::cout <<std::setprecision(4)<< "ConjugateGradient: mmp "<<b <<std::endl;
|
||||
std::cout <<std::setprecision(4)<< "ConjugateGradient: cp,r "<<cp <<std::endl;
|
||||
std::cout <<std::setprecision(4)<< "ConjugateGradient: p "<<a <<std::endl;
|
||||
}
|
||||
std::cout<<GridLogIterative <<std::setprecision(4)<< "ConjugateGradient: guess "<<guess<<std::endl;
|
||||
std::cout<<GridLogIterative <<std::setprecision(4)<< "ConjugateGradient: src "<<ssq <<std::endl;
|
||||
std::cout<<GridLogIterative <<std::setprecision(4)<< "ConjugateGradient: mp "<<d <<std::endl;
|
||||
std::cout<<GridLogIterative <<std::setprecision(4)<< "ConjugateGradient: mmp "<<b <<std::endl;
|
||||
std::cout<<GridLogIterative <<std::setprecision(4)<< "ConjugateGradient: cp,r "<<cp <<std::endl;
|
||||
std::cout<<GridLogIterative <<std::setprecision(4)<< "ConjugateGradient: p "<<a <<std::endl;
|
||||
|
||||
RealD rsq = Tolerance* Tolerance*ssq;
|
||||
|
||||
@ -58,7 +54,7 @@ public:
|
||||
return;
|
||||
}
|
||||
|
||||
if(verbose) std::cout << std::setprecision(4)<< "ConjugateGradient: k=0 residual "<<cp<<" rsq"<<rsq<<std::endl;
|
||||
std::cout<<GridLogIterative << std::setprecision(4)<< "ConjugateGradient: k=0 residual "<<cp<<" rsq"<<rsq<<std::endl;
|
||||
|
||||
int k;
|
||||
for (k=1;k<=MaxIterations;k++){
|
||||
@ -80,7 +76,7 @@ public:
|
||||
psi= a*p+psi;
|
||||
p = p*b+r;
|
||||
|
||||
if (verbose) std::cout<<"ConjugateGradient: Iteration " <<k<<" residual "<<cp<< " target"<< rsq<<std::endl;
|
||||
std::cout<<GridLogIterative<<"ConjugateGradient: Iteration " <<k<<" residual "<<cp<< " target"<< rsq<<std::endl;
|
||||
|
||||
// Stopping condition
|
||||
if ( cp <= rsq ) {
|
||||
@ -94,14 +90,14 @@ public:
|
||||
RealD resnorm = sqrt(norm2(p));
|
||||
RealD true_residual = resnorm/srcnorm;
|
||||
|
||||
std::cout<<"ConjugateGradient: Converged on iteration " <<k
|
||||
std::cout<<GridLogMessage<<"ConjugateGradient: Converged on iteration " <<k
|
||||
<<" computed residual "<<sqrt(cp/ssq)
|
||||
<<" true residual "<<true_residual
|
||||
<<" target "<<Tolerance<<std::endl;
|
||||
return;
|
||||
}
|
||||
}
|
||||
std::cout<<"ConjugateGradient did NOT converge"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"ConjugateGradient did NOT converge"<<std::endl;
|
||||
assert(0);
|
||||
}
|
||||
};
|
||||
|
@ -91,7 +91,7 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
|
||||
cp = norm2(src);
|
||||
for(int s=0;s<nshift;s++){
|
||||
rsq[s] = cp * mresidual[s] * mresidual[s];
|
||||
std::cout<<"ConjugateGradientMultiShift: shift "<<s
|
||||
std::cout<<GridLogMessage<<"ConjugateGradientMultiShift: shift "<<s
|
||||
<<" target resid "<<rsq[s]<<std::endl;
|
||||
ps[s] = src;
|
||||
}
|
||||
@ -109,7 +109,7 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
|
||||
// p and mmp is equal to d after this since
|
||||
// the d computation is tricky
|
||||
// qq = real(innerProduct(p,mmp));
|
||||
// std::cout << "debug equal ? qq "<<qq<<" d "<< d<<std::endl;
|
||||
// std::cout<<GridLogMessage << "debug equal ? qq "<<qq<<" d "<< d<<std::endl;
|
||||
|
||||
b = -cp /d;
|
||||
|
||||
@ -214,7 +214,7 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
|
||||
|
||||
if(css<rsq[s]){
|
||||
if ( ! converged[s] )
|
||||
std::cout<<"ConjugateGradientMultiShift k="<<k<<" Shift "<<s<<" has converged"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"ConjugateGradientMultiShift k="<<k<<" Shift "<<s<<" has converged"<<std::endl;
|
||||
converged[s]=1;
|
||||
} else {
|
||||
all_converged=0;
|
||||
@ -225,8 +225,8 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
|
||||
|
||||
if ( all_converged ){
|
||||
|
||||
std::cout<< "CGMultiShift: All shifts have converged iteration "<<k<<std::endl;
|
||||
std::cout<< "CGMultiShift: Checking solutions"<<std::endl;
|
||||
std::cout<<GridLogMessage<< "CGMultiShift: All shifts have converged iteration "<<k<<std::endl;
|
||||
std::cout<<GridLogMessage<< "CGMultiShift: Checking solutions"<<std::endl;
|
||||
|
||||
// Check answers
|
||||
for(int s=0; s < nshift; s++) {
|
||||
@ -235,13 +235,13 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
|
||||
axpy(r,-alpha[s],src,tmp);
|
||||
RealD rn = norm2(r);
|
||||
RealD cn = norm2(src);
|
||||
std::cout<<"CGMultiShift: shift["<<s<<"] true residual "<<std::sqrt(rn/cn)<<std::endl;
|
||||
std::cout<<GridLogMessage<<"CGMultiShift: shift["<<s<<"] true residual "<<std::sqrt(rn/cn)<<std::endl;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
// ugly hack
|
||||
std::cout<<"CG multi shift did not converge"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"CG multi shift did not converge"<<std::endl;
|
||||
assert(0);
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ namespace Grid {
|
||||
ssq=norm2(src);
|
||||
rsq=Tolerance*Tolerance*ssq;
|
||||
|
||||
if (verbose) std::cout<<"ConjugateResidual: iteration " <<0<<" residual "<<cp<< " target"<< rsq<<std::endl;
|
||||
if (verbose) std::cout<<GridLogMessage<<"ConjugateResidual: iteration " <<0<<" residual "<<cp<< " target"<< rsq<<std::endl;
|
||||
|
||||
for(int k=1;k<MaxIterations;k++){
|
||||
|
||||
@ -60,13 +60,13 @@ namespace Grid {
|
||||
axpy(p,b,p,r);
|
||||
pAAp=axpy_norm(Ap,b,Ap,Ar);
|
||||
|
||||
if(verbose) std::cout<<"ConjugateResidual: iteration " <<k<<" residual "<<cp<< " target"<< rsq<<std::endl;
|
||||
if(verbose) std::cout<<GridLogMessage<<"ConjugateResidual: iteration " <<k<<" residual "<<cp<< " target"<< rsq<<std::endl;
|
||||
|
||||
if(cp<rsq) {
|
||||
Linop.HermOp(psi,Ap);
|
||||
axpy(r,-1.0,src,Ap);
|
||||
RealD true_resid = norm2(r)/ssq;
|
||||
std::cout<<"ConjugateResidual: Converged on iteration " <<k
|
||||
std::cout<<GridLogMessage<<"ConjugateResidual: Converged on iteration " <<k
|
||||
<< " computed residual "<<sqrt(cp/ssq)
|
||||
<< " true residual "<<sqrt(true_resid)
|
||||
<< " target " <<Tolerance <<std::endl;
|
||||
@ -75,7 +75,7 @@ namespace Grid {
|
||||
|
||||
}
|
||||
|
||||
std::cout<<"ConjugateResidual did NOT converge"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"ConjugateResidual did NOT converge"<<std::endl;
|
||||
assert(0);
|
||||
}
|
||||
};
|
||||
|
@ -45,13 +45,13 @@ namespace Grid {
|
||||
|
||||
cp=GCRnStep(Linop,src,psi,rsq);
|
||||
|
||||
if ( verbose ) std::cout<<"VPGCR("<<mmax<<","<<nstep<<") "<< steps <<" steps cp = "<<cp<<std::endl;
|
||||
if ( verbose ) std::cout<<GridLogMessage<<"VPGCR("<<mmax<<","<<nstep<<") "<< steps <<" steps cp = "<<cp<<std::endl;
|
||||
|
||||
if(cp<rsq) {
|
||||
Linop.HermOp(psi,r);
|
||||
axpy(r,-1.0,src,r);
|
||||
RealD tr = norm2(r);
|
||||
std::cout<<"PrecGeneralisedConjugateResidual: Converged on iteration " <<steps
|
||||
std::cout<<GridLogMessage<<"PrecGeneralisedConjugateResidual: Converged on iteration " <<steps
|
||||
<< " computed residual "<<sqrt(cp/ssq)
|
||||
<< " true residual " <<sqrt(tr/ssq)
|
||||
<< " target " <<Tolerance <<std::endl;
|
||||
@ -59,7 +59,7 @@ namespace Grid {
|
||||
}
|
||||
|
||||
}
|
||||
std::cout<<"Variable Preconditioned GCR did not converge"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Variable Preconditioned GCR did not converge"<<std::endl;
|
||||
assert(0);
|
||||
}
|
||||
RealD GCRnStep(LinearOperatorBase<Field> &Linop,const Field &src, Field &psi,RealD rsq){
|
||||
@ -96,21 +96,21 @@ namespace Grid {
|
||||
/////////////////////
|
||||
Preconditioner(r,z);
|
||||
|
||||
std::cout<< " Preconditioner in " << norm2(r)<<std::endl;
|
||||
std::cout<< " Preconditioner out " << norm2(z)<<std::endl;
|
||||
std::cout<<GridLogMessage<< " Preconditioner in " << norm2(r)<<std::endl;
|
||||
std::cout<<GridLogMessage<< " Preconditioner out " << norm2(z)<<std::endl;
|
||||
|
||||
Linop.HermOp(z,tmp);
|
||||
|
||||
std::cout<< " Preconditioner Aout " << norm2(tmp)<<std::endl;
|
||||
std::cout<<GridLogMessage<< " Preconditioner Aout " << norm2(tmp)<<std::endl;
|
||||
ttmp=tmp;
|
||||
tmp=tmp-r;
|
||||
|
||||
std::cout<< " Preconditioner resid " << std::sqrt(norm2(tmp)/norm2(r))<<std::endl;
|
||||
std::cout<<GridLogMessage<< " Preconditioner resid " << std::sqrt(norm2(tmp)/norm2(r))<<std::endl;
|
||||
/*
|
||||
std::cout<<r<<std::endl;
|
||||
std::cout<<z<<std::endl;
|
||||
std::cout<<ttmp<<std::endl;
|
||||
std::cout<<tmp<<std::endl;
|
||||
std::cout<<GridLogMessage<<r<<std::endl;
|
||||
std::cout<<GridLogMessage<<z<<std::endl;
|
||||
std::cout<<GridLogMessage<<ttmp<<std::endl;
|
||||
std::cout<<GridLogMessage<<tmp<<std::endl;
|
||||
*/
|
||||
|
||||
Linop.HermOpAndNorm(z,Az,zAz,zAAz);
|
||||
@ -137,7 +137,7 @@ namespace Grid {
|
||||
|
||||
cp = axpy_norm(r,-a,q[peri_k],r);
|
||||
|
||||
std::cout<< " VPGCR_step resid" <<sqrt(cp/rsq)<<std::endl;
|
||||
std::cout<<GridLogMessage<< " VPGCR_step resid" <<sqrt(cp/rsq)<<std::endl;
|
||||
if((k==nstep-1)||(cp<rsq)){
|
||||
return cp;
|
||||
}
|
||||
@ -148,7 +148,7 @@ namespace Grid {
|
||||
|
||||
Linop.HermOp(z,tmp);
|
||||
tmp=tmp-r;
|
||||
std::cout<< " Preconditioner resid" <<sqrt(norm2(tmp)/norm2(r))<<std::endl;
|
||||
std::cout<<GridLogMessage<< " Preconditioner resid" <<sqrt(norm2(tmp)/norm2(r))<<std::endl;
|
||||
|
||||
q[peri_kp]=Az;
|
||||
p[peri_kp]=z;
|
||||
|
@ -89,7 +89,7 @@ namespace Grid {
|
||||
//////////////////////////////////////////////////////////////
|
||||
// Call the red-black solver
|
||||
//////////////////////////////////////////////////////////////
|
||||
std::cout << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
|
||||
std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
|
||||
_HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
@ -108,7 +108,7 @@ namespace Grid {
|
||||
RealD ns = norm2(in);
|
||||
RealD nr = norm2(resid);
|
||||
|
||||
std::cout << "SchurRedBlackDiagMooee solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl;
|
||||
std::cout<<GridLogMessage << "SchurRedBlackDiagMooee solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -132,18 +132,18 @@ inline void CBFromExpression(int &cb,const T1& lat) // Lattice leaf
|
||||
assert(cb==lat.checkerboard);
|
||||
}
|
||||
cb=lat.checkerboard;
|
||||
// std::cout<<"Lattice leaf cb "<<cb<<std::endl;
|
||||
// std::cout<<GridLogMessage<<"Lattice leaf cb "<<cb<<std::endl;
|
||||
}
|
||||
template<class T1,typename std::enable_if<!is_lattice<T1>::value, T1>::type * = nullptr >
|
||||
inline void CBFromExpression(int &cb,const T1& notlat) // non-lattice leaf
|
||||
{
|
||||
// std::cout<<"Non lattice leaf cb"<<cb<<std::endl;
|
||||
// std::cout<<GridLogMessage<<"Non lattice leaf cb"<<cb<<std::endl;
|
||||
}
|
||||
template <typename Op, typename T1>
|
||||
inline void CBFromExpression(int &cb,const LatticeUnaryExpression<Op,T1 > &expr)
|
||||
{
|
||||
CBFromExpression(cb,std::get<0>(expr.second));// recurse
|
||||
// std::cout<<"Unary node cb "<<cb<<std::endl;
|
||||
// std::cout<<GridLogMessage<<"Unary node cb "<<cb<<std::endl;
|
||||
}
|
||||
|
||||
template <typename Op, typename T1, typename T2>
|
||||
@ -151,7 +151,7 @@ inline void CBFromExpression(int &cb,const LatticeBinaryExpression<Op,T1,T2> &ex
|
||||
{
|
||||
CBFromExpression(cb,std::get<0>(expr.second));// recurse
|
||||
CBFromExpression(cb,std::get<1>(expr.second));
|
||||
// std::cout<<"Binary node cb "<<cb<<std::endl;
|
||||
// std::cout<<GridLogMessage<<"Binary node cb "<<cb<<std::endl;
|
||||
}
|
||||
template <typename Op, typename T1, typename T2, typename T3>
|
||||
inline void CBFromExpression( int &cb,const LatticeTrinaryExpression<Op,T1,T2,T3 > &expr)
|
||||
@ -159,7 +159,7 @@ inline void CBFromExpression( int &cb,const LatticeTrinaryExpression<Op,T1,T2,T3
|
||||
CBFromExpression(cb,std::get<0>(expr.second));// recurse
|
||||
CBFromExpression(cb,std::get<1>(expr.second));
|
||||
CBFromExpression(cb,std::get<2>(expr.second));
|
||||
// std::cout<<"Trinary node cb "<<cb<<std::endl;
|
||||
// std::cout<<GridLogMessage<<"Trinary node cb "<<cb<<std::endl;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////
|
||||
@ -370,7 +370,7 @@ using namespace Grid;
|
||||
tmp.func(eval(0,v1),eval(0,v2));
|
||||
|
||||
auto var = v1+v2;
|
||||
std::cout<<typeid(var).name()<<std::endl;
|
||||
std::cout<<GridLogMessage<<typeid(var).name()<<std::endl;
|
||||
|
||||
v3=v1+v2;
|
||||
v3=v1+v2+v1*v2;
|
||||
|
@ -60,6 +60,11 @@ public:
|
||||
GridBase *_grid;
|
||||
int checkerboard;
|
||||
std::vector<vobj,alignedAllocator<vobj> > _odata;
|
||||
|
||||
// to pthread need a computable loop where loop induction is not required
|
||||
int begin(void) { return 0;};
|
||||
int end(void) { return _odata.size(); }
|
||||
vobj & operator[](int i) { return _odata[i]; };
|
||||
|
||||
public:
|
||||
typedef typename vobj::scalar_type scalar_type;
|
||||
@ -221,7 +226,7 @@ PARALLEL_FOR_LOOP
|
||||
template<class robj> strong_inline Lattice<vobj> & operator = (const Lattice<robj> & r){
|
||||
this->checkerboard = r.checkerboard;
|
||||
conformable(*this,r);
|
||||
std::cout<<"Lattice operator ="<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Lattice operator ="<<std::endl;
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int ss=0;ss<_grid->oSites();ss++){
|
||||
this->_odata[ss]=r._odata[ss];
|
||||
|
@ -125,7 +125,7 @@ template<class vobj> inline void sliceSum(const Lattice<vobj> &Data,std::vector<
|
||||
assert(grid!=NULL);
|
||||
|
||||
// FIXME
|
||||
std::cout<<"WARNING ! SliceSum is unthreaded "<<grid->SumArraySize()<<" threads "<<std::endl;
|
||||
std::cout<<GridLogMessage<<"WARNING ! SliceSum is unthreaded "<<grid->SumArraySize()<<" threads "<<std::endl;
|
||||
|
||||
const int Nd = grid->_ndimension;
|
||||
const int Nsimd = grid->Nsimd();
|
||||
|
@ -233,7 +233,8 @@ namespace Grid {
|
||||
int words=sizeof(scalar_object)/sizeof(scalar_type);
|
||||
|
||||
std::vector<scalar_object> buf(Nsimd);
|
||||
|
||||
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int ss=0;ss<osites;ss++){
|
||||
for(int si=0;si<Nsimd;si++){
|
||||
|
||||
|
@ -23,7 +23,7 @@ inline void subdivides(GridBase *coarse,GridBase *fine)
|
||||
template<class vobj> inline void pickCheckerboard(int cb,Lattice<vobj> &half,const Lattice<vobj> &full){
|
||||
half.checkerboard = cb;
|
||||
int ssh=0;
|
||||
PARALLEL_FOR_LOOP
|
||||
//PARALLEL_FOR_LOOP
|
||||
for(int ss=0;ss<full._grid->oSites();ss++){
|
||||
std::vector<int> coor;
|
||||
int cbos;
|
||||
@ -40,7 +40,7 @@ PARALLEL_FOR_LOOP
|
||||
template<class vobj> inline void setCheckerboard(Lattice<vobj> &full,const Lattice<vobj> &half){
|
||||
int cb = half.checkerboard;
|
||||
int ssh=0;
|
||||
PARALLEL_FOR_LOOP
|
||||
//PARALLEL_FOR_LOOP
|
||||
for(int ss=0;ss<full._grid->oSites();ss++){
|
||||
std::vector<int> coor;
|
||||
int cbos;
|
||||
@ -158,6 +158,7 @@ template<class vobj,class CComplex>
|
||||
|
||||
fine_inner = localInnerProduct(fineX,fineY);
|
||||
blockSum(coarse_inner,fine_inner);
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int ss=0;ss<coarse->oSites();ss++){
|
||||
CoarseInner._odata[ss] = coarse_inner._odata[ss];
|
||||
}
|
||||
|
44
lib/pugixml/README.md
Normal file
44
lib/pugixml/README.md
Normal file
@ -0,0 +1,44 @@
|
||||
pugixml [](https://travis-ci.org/zeux/pugixml) [](https://ci.appveyor.com/project/zeux/pugixml)
|
||||
=======
|
||||
|
||||
pugixml is a C++ XML processing library, which consists of a DOM-like interface with rich traversal/modification
|
||||
capabilities, an extremely fast XML parser which constructs the DOM tree from an XML file/buffer, and an XPath 1.0
|
||||
implementation for complex data-driven tree queries. Full Unicode support is also available, with Unicode interface
|
||||
variants and conversions between different Unicode encodings (which happen automatically during parsing/saving).
|
||||
|
||||
pugixml is used by a lot of projects, both open-source and proprietary, for performance and easy-to-use interface.
|
||||
|
||||
## Documentation
|
||||
|
||||
Documentation for the current release of pugixml is available on-line as two separate documents:
|
||||
|
||||
* [Quick-start guide](http://pugixml.org/docs/quickstart.html), that aims to provide enough information to start using the library;
|
||||
* [Complete reference manual](http://pugixml.org/docs/manual.html), that describes all features of the library in detail.
|
||||
|
||||
You’re advised to start with the quick-start guide; however, many important library features are either not described in it at all or only mentioned briefly; if you require more information you should read the complete manual.
|
||||
|
||||
## License
|
||||
This library is available to anybody free of charge, under the terms of MIT License:
|
||||
|
||||
Copyright (c) 2006-2015 Arseny Kapoulkine
|
||||
|
||||
Permission is hereby granted, free of charge, to any person
|
||||
obtaining a copy of this software and associated documentation
|
||||
files (the "Software"), to deal in the Software without
|
||||
restriction, including without limitation the rights to use,
|
||||
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
71
lib/pugixml/pugiconfig.hpp
Normal file
71
lib/pugixml/pugiconfig.hpp
Normal file
@ -0,0 +1,71 @@
|
||||
/**
|
||||
* pugixml parser - version 1.6
|
||||
* --------------------------------------------------------
|
||||
* Copyright (C) 2006-2015, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com)
|
||||
* Report bugs and download new versions at http://pugixml.org/
|
||||
*
|
||||
* This library is distributed under the MIT License. See notice at the end
|
||||
* of this file.
|
||||
*
|
||||
* This work is based on the pugxml parser, which is:
|
||||
* Copyright (C) 2003, by Kristen Wegner (kristen@tima.net)
|
||||
*/
|
||||
|
||||
#ifndef HEADER_PUGICONFIG_HPP
|
||||
#define HEADER_PUGICONFIG_HPP
|
||||
|
||||
// Uncomment this to enable wchar_t mode
|
||||
// #define PUGIXML_WCHAR_MODE
|
||||
|
||||
// Uncomment this to disable XPath
|
||||
// #define PUGIXML_NO_XPATH
|
||||
|
||||
// Uncomment this to disable STL
|
||||
// #define PUGIXML_NO_STL
|
||||
|
||||
// Uncomment this to disable exceptions
|
||||
// #define PUGIXML_NO_EXCEPTIONS
|
||||
|
||||
// Set this to control attributes for public classes/functions, i.e.:
|
||||
// #define PUGIXML_API __declspec(dllexport) // to export all public symbols from DLL
|
||||
// #define PUGIXML_CLASS __declspec(dllimport) // to import all classes from DLL
|
||||
// #define PUGIXML_FUNCTION __fastcall // to set calling conventions to all public functions to fastcall
|
||||
// In absence of PUGIXML_CLASS/PUGIXML_FUNCTION definitions PUGIXML_API is used instead
|
||||
|
||||
// Tune these constants to adjust memory-related behavior
|
||||
// #define PUGIXML_MEMORY_PAGE_SIZE 32768
|
||||
// #define PUGIXML_MEMORY_OUTPUT_STACK 10240
|
||||
// #define PUGIXML_MEMORY_XPATH_PAGE_SIZE 4096
|
||||
|
||||
// Uncomment this to switch to header-only version
|
||||
// #define PUGIXML_HEADER_ONLY
|
||||
|
||||
// Uncomment this to enable long long support
|
||||
// #define PUGIXML_HAS_LONG_LONG
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Copyright (c) 2006-2015 Arseny Kapoulkine
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use,
|
||||
* copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following
|
||||
* conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
12485
lib/pugixml/pugixml.cpp
Normal file
12485
lib/pugixml/pugixml.cpp
Normal file
File diff suppressed because it is too large
Load Diff
1400
lib/pugixml/pugixml.hpp
Normal file
1400
lib/pugixml/pugixml.hpp
Normal file
File diff suppressed because it is too large
Load Diff
52
lib/pugixml/readme.txt
Normal file
52
lib/pugixml/readme.txt
Normal file
@ -0,0 +1,52 @@
|
||||
pugixml 1.6 - an XML processing library
|
||||
|
||||
Copyright (C) 2006-2015, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com)
|
||||
Report bugs and download new versions at http://pugixml.org/
|
||||
|
||||
This is the distribution of pugixml, which is a C++ XML processing library,
|
||||
which consists of a DOM-like interface with rich traversal/modification
|
||||
capabilities, an extremely fast XML parser which constructs the DOM tree from
|
||||
an XML file/buffer, and an XPath 1.0 implementation for complex data-driven
|
||||
tree queries. Full Unicode support is also available, with Unicode interface
|
||||
variants and conversions between different Unicode encodings (which happen
|
||||
automatically during parsing/saving).
|
||||
|
||||
The distribution contains the following folders:
|
||||
|
||||
contrib/ - various contributions to pugixml
|
||||
|
||||
docs/ - documentation
|
||||
docs/samples - pugixml usage examples
|
||||
docs/quickstart.html - quick start guide
|
||||
docs/manual.html - complete manual
|
||||
|
||||
scripts/ - project files for IDE/build systems
|
||||
|
||||
src/ - header and source files
|
||||
|
||||
readme.txt - this file.
|
||||
|
||||
This library is distributed under the MIT License:
|
||||
|
||||
Copyright (c) 2006-2015 Arseny Kapoulkine
|
||||
|
||||
Permission is hereby granted, free of charge, to any person
|
||||
obtaining a copy of this software and associated documentation
|
||||
files (the "Software"), to deal in the Software without
|
||||
restriction, including without limitation the rights to use,
|
||||
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
@ -7,14 +7,30 @@ template<class GaugeField>
|
||||
class Action {
|
||||
|
||||
public:
|
||||
virtual void init(const GaugeField &U, GridParallelRNG& pRNG) = 0;
|
||||
virtual RealD S(const GaugeField &U) = 0; // evaluate the action
|
||||
virtual void deriv(const GaugeField &U,GaugeField & dSdU ) = 0; // evaluate the action derivative
|
||||
//virtual void refresh(const GaugeField & ) {} ;
|
||||
virtual void init (const GaugeField &U, GridParallelRNG& pRNG) = 0; //
|
||||
virtual RealD S (const GaugeField &U) = 0; // evaluate the action
|
||||
virtual void deriv(const GaugeField &U,GaugeField & dSdU ) = 0; // evaluate the action derivative
|
||||
virtual void refresh(const GaugeField & ) {}; // Default to no-op for actions with no internal fields
|
||||
// Boundary conditions?
|
||||
// Heatbath?
|
||||
virtual ~Action() {};
|
||||
};
|
||||
|
||||
// Could derive PseudoFermion action with a PF field, FermionField, and a Grid; implement refresh
|
||||
template<class GaugeField, class FermionField>
|
||||
class PseudoFermionAction : public Action<GaugeField> {
|
||||
public:
|
||||
FermionField Phi;
|
||||
GridParallelRNG &pRNG;
|
||||
GridBase &Grid;
|
||||
|
||||
PseudoFermionAction(GridBase &_Grid,GridParallelRNG &_pRNG) : Grid(_Grid), Phi(&_Grid), pRNG(_pRNG) {
|
||||
};
|
||||
|
||||
virtual void refresh(const GaugeField &gauge) {
|
||||
gaussian(Phi,pRNG);
|
||||
};
|
||||
|
||||
};
|
||||
}}
|
||||
#endif
|
||||
|
@ -79,4 +79,10 @@
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
#include <qcd/action/fermion/g5HermitianLinop.h>
|
||||
|
||||
////////////////////////////////////////
|
||||
// Pseudo fermion combinations
|
||||
////////////////////////////////////////
|
||||
#include <qcd/action/pseudofermion/TwoFlavour.h>
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -10,12 +10,12 @@ namespace Grid {
|
||||
void ContinuedFractionFermion5D::SetCoefficientsZolotarev(RealD zolo_hi,Approx::zolotarev_data *zdata)
|
||||
{
|
||||
// How to check Ls matches??
|
||||
// std::cout << Ls << " Ls"<<std::endl;
|
||||
// std::cout << zdata->n << " - n"<<std::endl;
|
||||
// std::cout << zdata->da << " -da "<<std::endl;
|
||||
// std::cout << zdata->db << " -db"<<std::endl;
|
||||
// std::cout << zdata->dn << " -dn"<<std::endl;
|
||||
// std::cout << zdata->dd << " -dd"<<std::endl;
|
||||
// std::cout<<GridLogMessage << Ls << " Ls"<<std::endl;
|
||||
// std::cout<<GridLogMessage << zdata->n << " - n"<<std::endl;
|
||||
// std::cout<<GridLogMessage << zdata->da << " -da "<<std::endl;
|
||||
// std::cout<<GridLogMessage << zdata->db << " -db"<<std::endl;
|
||||
// std::cout<<GridLogMessage << zdata->dn << " -dn"<<std::endl;
|
||||
// std::cout<<GridLogMessage << zdata->dd << " -dd"<<std::endl;
|
||||
|
||||
assert(zdata->db==Ls);// Beta has Ls coeffs
|
||||
|
||||
@ -55,7 +55,7 @@ namespace Grid {
|
||||
See[s] = Aee[s] - 1.0/See[s-1];
|
||||
}
|
||||
for(int s=0;s<Ls;s++){
|
||||
std::cout <<"s = "<<s<<" Beta "<<Beta[s]<<" Aee "<<Aee[s] <<" See "<<See[s] <<std::endl;
|
||||
std::cout<<GridLogMessage <<"s = "<<s<<" Beta "<<Beta[s]<<" Aee "<<Aee[s] <<" See "<<See[s] <<std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ namespace Grid {
|
||||
Approx::zolotarev_data *zdata = Approx::higham(eps,this->Ls);// eps is ignored for higham
|
||||
assert(zdata->n==this->Ls);
|
||||
|
||||
std::cout << "DomainWallFermion with Ls="<<Ls<<std::endl;
|
||||
std::cout<<GridLogMessage << "DomainWallFermion with Ls="<<Ls<<std::endl;
|
||||
// Call base setter
|
||||
this->CayleyFermion5D::SetCoefficientsTanh(zdata,1.0,0.0);
|
||||
|
||||
|
@ -39,10 +39,27 @@ namespace Grid {
|
||||
virtual void Dhop (const FermionField &in, FermionField &out,int dag)=0;
|
||||
virtual void DhopOE(const FermionField &in, FermionField &out,int dag)=0;
|
||||
virtual void DhopEO(const FermionField &in, FermionField &out,int dag)=0;
|
||||
virtual void DhopDir(const FermionField &in, FermionField &out,int dir,int disp)=0; // implemented by WilsonFermion and WilsonFermion5D
|
||||
|
||||
virtual void Mdiag(const FermionField &in, FermionField &out) { Mooee(in,out);}; // Same as Mooee applied to both CB's
|
||||
virtual void Mdir (const FermionField &in, FermionField &out,int dir,int disp)=0; // case by case Wilson, Clover, Cayley, ContFrac, PartFrac
|
||||
virtual void DhopDir(const FermionField &in, FermionField &out,int dir,int disp)=0; // implemented by WilsonFermion and WilsonFermion5D
|
||||
// force terms; five routines; default to Dhop on diagonal
|
||||
virtual void MDeriv (LatticeGaugeField &mat,const FermionField &U,const FermionField &V,int dag){DhopDeriv(mat,U,V,dag);};
|
||||
virtual void MoeDeriv(LatticeGaugeField &mat,const FermionField &U,const FermionField &V,int dag){DhopDerivOE(mat,U,V,dag);};
|
||||
virtual void MeoDeriv(LatticeGaugeField &mat,const FermionField &U,const FermionField &V,int dag){DhopDerivEO(mat,U,V,dag);};
|
||||
virtual void MooDeriv(LatticeGaugeField &mat,const FermionField &U,const FermionField &V,int dag){mat=zero;};
|
||||
virtual void MeeDeriv(LatticeGaugeField &mat,const FermionField &U,const FermionField &V,int dag){mat=zero;};
|
||||
|
||||
virtual void DhopDeriv (LatticeGaugeField &mat,const FermionField &U,const FermionField &V,int dag)=0;
|
||||
virtual void DhopDerivEO(LatticeGaugeField &mat,const FermionField &U,const FermionField &V,int dag)=0;
|
||||
virtual void DhopDerivOE(LatticeGaugeField &mat,const FermionField &U,const FermionField &V,int dag)=0;
|
||||
|
||||
|
||||
virtual void Mdiag (const FermionField &in, FermionField &out) { Mooee(in,out);}; // Same as Mooee applied to both CB's
|
||||
virtual void Mdir (const FermionField &in, FermionField &out,int dir,int disp)=0; // case by case Wilson, Clover, Cayley, ContFrac, PartFrac
|
||||
|
||||
///////////////////////////////////////////////
|
||||
// Updates gauge field during HMC
|
||||
///////////////////////////////////////////////
|
||||
virtual void ImportGauge(const GaugeField & _U);
|
||||
|
||||
};
|
||||
|
||||
|
@ -30,7 +30,7 @@ namespace Grid {
|
||||
{
|
||||
RealD eps = 1.0;
|
||||
|
||||
std::cout << "MobiusFermion (b="<<b<<",c="<<c<<") with Ls= "<<Ls<<" Tanh approx"<<std::endl;
|
||||
std::cout<<GridLogMessage << "MobiusFermion (b="<<b<<",c="<<c<<") with Ls= "<<Ls<<" Tanh approx"<<std::endl;
|
||||
Approx::zolotarev_data *zdata = Approx::higham(eps,this->Ls);// eps is ignored for higham
|
||||
assert(zdata->n==this->Ls);
|
||||
|
||||
|
@ -34,7 +34,7 @@ namespace Grid {
|
||||
Approx::zolotarev_data *zdata = Approx::zolotarev(eps,this->Ls,0);
|
||||
assert(zdata->n==this->Ls);
|
||||
|
||||
std::cout << "MobiusZolotarevFermion (b="<<b<<",c="<<c<<") with Ls= "<<Ls<<" Zolotarev range ["<<lo<<","<<hi<<"]"<<std::endl;
|
||||
std::cout<<GridLogMessage << "MobiusZolotarevFermion (b="<<b<<",c="<<c<<") with Ls= "<<Ls<<" Zolotarev range ["<<lo<<","<<hi<<"]"<<std::endl;
|
||||
|
||||
// Call base setter
|
||||
this->CayleyFermion5D::SetCoefficientsZolotarev(hi,zdata,b,c);
|
||||
|
@ -260,12 +260,12 @@ namespace Grid {
|
||||
void PartialFractionFermion5D::SetCoefficientsZolotarev(RealD zolo_hi,Approx::zolotarev_data *zdata){
|
||||
|
||||
// check on degree matching
|
||||
// std::cout << Ls << " Ls"<<std::endl;
|
||||
// std::cout << zdata->n << " - n"<<std::endl;
|
||||
// std::cout << zdata->da << " -da "<<std::endl;
|
||||
// std::cout << zdata->db << " -db"<<std::endl;
|
||||
// std::cout << zdata->dn << " -dn"<<std::endl;
|
||||
// std::cout << zdata->dd << " -dd"<<std::endl;
|
||||
// std::cout<<GridLogMessage << Ls << " Ls"<<std::endl;
|
||||
// std::cout<<GridLogMessage << zdata->n << " - n"<<std::endl;
|
||||
// std::cout<<GridLogMessage << zdata->da << " -da "<<std::endl;
|
||||
// std::cout<<GridLogMessage << zdata->db << " -db"<<std::endl;
|
||||
// std::cout<<GridLogMessage << zdata->dn << " -dn"<<std::endl;
|
||||
// std::cout<<GridLogMessage << zdata->dd << " -dd"<<std::endl;
|
||||
assert(Ls == (2*zdata->da -1) );
|
||||
|
||||
// Part frac
|
||||
|
@ -24,6 +24,10 @@ WilsonFermion::WilsonFermion(LatticeGaugeField &_Umu,
|
||||
{
|
||||
// Allocate the required comms buffer
|
||||
comm_buf.resize(Stencil._unified_buffer_size); // this is always big enough to contain EO
|
||||
ImportGauge(_Umu);
|
||||
}
|
||||
void WilsonFermion::ImportGauge(const LatticeGaugeField &_Umu)
|
||||
{
|
||||
DoubleStore(Umu,_Umu);
|
||||
pickCheckerboard(Even,UmuEven,Umu);
|
||||
pickCheckerboard(Odd ,UmuOdd,Umu);
|
||||
@ -98,7 +102,9 @@ void WilsonFermion::Mdir (const LatticeFermion &in, LatticeFermion &out,int dir,
|
||||
DhopDir(in,out,dir,disp);
|
||||
}
|
||||
void WilsonFermion::DhopDir(const LatticeFermion &in, LatticeFermion &out,int dir,int disp){
|
||||
|
||||
WilsonCompressor compressor(DaggerNo);
|
||||
|
||||
Stencil.HaloExchange<vSpinColourVector,vHalfSpinColourVector,WilsonCompressor>(in,comm_buf,compressor);
|
||||
|
||||
assert( (disp==1)||(disp==-1) );
|
||||
@ -109,9 +115,22 @@ void WilsonFermion::DhopDir(const LatticeFermion &in, LatticeFermion &out,int di
|
||||
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int sss=0;sss<in._grid->oSites();sss++){
|
||||
DiracOptDhopDir(Stencil,Umu,comm_buf,sss,sss,in,out,dirdisp);
|
||||
DiracOptDhopDir(Stencil,Umu,comm_buf,sss,sss,in,out,dirdisp,dirdisp);
|
||||
}
|
||||
|
||||
};
|
||||
void WilsonFermion::DhopDirDisp(const LatticeFermion &in, LatticeFermion &out,int dirdisp,int gamma,int dag)
|
||||
{
|
||||
|
||||
WilsonCompressor compressor(dag);
|
||||
|
||||
Stencil.HaloExchange<vSpinColourVector,vHalfSpinColourVector,WilsonCompressor>(in,comm_buf,compressor);
|
||||
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int sss=0;sss<in._grid->oSites();sss++){
|
||||
DiracOptDhopDir(Stencil,Umu,comm_buf,sss,sss,in,out,dirdisp,gamma);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
void WilsonFermion::DhopInternal(CartesianStencil & st,LatticeDoubledGaugeField & U,
|
||||
@ -177,6 +196,77 @@ void WilsonFermion::Dhop(const LatticeFermion &in, LatticeFermion &out,int dag)
|
||||
DhopInternal(Stencil,Umu,in,out,dag);
|
||||
}
|
||||
|
||||
void WilsonFermion::DerivInternal(CartesianStencil & st,LatticeDoubledGaugeField & U,
|
||||
LatticeGaugeField &mat,const LatticeFermion &A,const LatticeFermion &B,int dag)
|
||||
{
|
||||
assert((dag==DaggerNo) ||(dag==DaggerYes));
|
||||
|
||||
WilsonCompressor compressor(dag);
|
||||
|
||||
LatticeColourMatrix tmp(B._grid);
|
||||
LatticeFermion Btilde(B._grid);
|
||||
|
||||
st.HaloExchange<vSpinColourVector,vHalfSpinColourVector,WilsonCompressor>(B,comm_buf,compressor);
|
||||
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Flip gamma (1+g)<->(1-g) if dag
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
int gamma = mu;
|
||||
if ( dag ) gamma+= Nd;
|
||||
|
||||
////////////////////////
|
||||
// Call the single hop
|
||||
////////////////////////
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int sss=0;sss<B._grid->oSites();sss++){
|
||||
DiracOptDhopDir(st,U,comm_buf,sss,sss,B,Btilde,mu,gamma);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
// spin trace outer product
|
||||
//////////////////////////////////////////////////
|
||||
tmp = TraceIndex<SpinIndex>(outerProduct(Btilde,A));
|
||||
PokeIndex<LorentzIndex>(mat,tmp,mu);
|
||||
|
||||
}
|
||||
}
|
||||
void WilsonFermion::DhopDeriv(LatticeGaugeField &mat,const LatticeFermion &U,const LatticeFermion &V,int dag)
|
||||
{
|
||||
conformable(U._grid,_grid);
|
||||
conformable(U._grid,V._grid);
|
||||
conformable(U._grid,mat._grid);
|
||||
|
||||
mat.checkerboard = U.checkerboard;
|
||||
|
||||
DerivInternal(Stencil,Umu,mat,U,V,dag);
|
||||
}
|
||||
void WilsonFermion::DhopDerivOE(LatticeGaugeField &mat,const LatticeFermion &U,const LatticeFermion &V,int dag)
|
||||
{
|
||||
conformable(U._grid,_cbgrid);
|
||||
conformable(U._grid,V._grid);
|
||||
conformable(U._grid,mat._grid);
|
||||
|
||||
assert(V.checkerboard==Even);
|
||||
assert(U.checkerboard==Odd);
|
||||
mat.checkerboard = Odd;
|
||||
|
||||
DerivInternal(StencilEven,UmuOdd,mat,U,V,dag);
|
||||
}
|
||||
void WilsonFermion::DhopDerivEO(LatticeGaugeField &mat,const LatticeFermion &U,const LatticeFermion &V,int dag)
|
||||
{
|
||||
conformable(U._grid,_cbgrid);
|
||||
conformable(U._grid,V._grid);
|
||||
conformable(U._grid,mat._grid);
|
||||
|
||||
assert(V.checkerboard==Odd);
|
||||
assert(U.checkerboard==Even);
|
||||
mat.checkerboard = Even;
|
||||
|
||||
DerivInternal(StencilOdd,UmuEven,mat,U,V,dag);
|
||||
}
|
||||
|
||||
}}
|
||||
|
||||
|
||||
|
@ -24,11 +24,95 @@ namespace Grid {
|
||||
// half checkerboard operaions
|
||||
void Meooe (const LatticeFermion &in, LatticeFermion &out);
|
||||
void MeooeDag (const LatticeFermion &in, LatticeFermion &out);
|
||||
|
||||
virtual void Mooee (const LatticeFermion &in, LatticeFermion &out); // remain virtual so we
|
||||
virtual void MooeeDag (const LatticeFermion &in, LatticeFermion &out); // can derive Clover
|
||||
virtual void MooeeInv (const LatticeFermion &in, LatticeFermion &out); // from Wilson base
|
||||
virtual void MooeeInvDag (const LatticeFermion &in, LatticeFermion &out);
|
||||
|
||||
////////////////////////
|
||||
//
|
||||
// Force term: d/dtau S = 0
|
||||
//
|
||||
// It is simplest to consider the two flavour force term
|
||||
//
|
||||
// S[U,phi] = phidag (MdagM)^-1 phi
|
||||
//
|
||||
// But simplify even this to
|
||||
//
|
||||
// S[U,phi] = phidag MdagM phi
|
||||
//
|
||||
// (other options exist depending on nature of action fragment.)
|
||||
//
|
||||
// Require momentum be traceless anti-hermitian to move within group manifold [ P = i P^a T^a ]
|
||||
//
|
||||
// Define the HMC hamiltonian
|
||||
//
|
||||
// H = 1/2 Tr P^2 + S(U,phi)
|
||||
//
|
||||
// .
|
||||
// U = P U (lorentz & color indices multiplied)
|
||||
//
|
||||
// Hence
|
||||
//
|
||||
// .c c c c
|
||||
// U = U P = - U P (c == dagger)
|
||||
//
|
||||
// So, taking some liberty with implicit indices
|
||||
// . . .c c
|
||||
// dH/dt = 0 = Tr P P +Tr[ U dS/dU + U dS/dU ]
|
||||
//
|
||||
// . c c
|
||||
// = Tr P P + i Tr[ P U dS/dU - U P dS/dU ]
|
||||
//
|
||||
// . c c
|
||||
// = Tr P (P + i ( U dS/dU - P dS/dU U ]
|
||||
//
|
||||
// . c c
|
||||
// => P = -i [ U dS/dU - dS/dU U ] generates HMC EoM
|
||||
//
|
||||
// Simple case work this out using S = phi^dag MdagM phi for wilson:
|
||||
// c c
|
||||
// dSdt = dU_xdt dSdUx + dUxdt dSdUx
|
||||
//
|
||||
// = Tr i P U_x [ (\phi^\dag)_x (1+g) (M \phi)_x+\mu +(\phi^\dag M^\dag)_x (1-g) \phi_{x+\mu} ]
|
||||
// c
|
||||
// - i U_x P [ (\phi^\dag)_x+mu (1-g) (M \phi)_x +(\phi^\dag M^\dag)_(x+\mu) (1+g) \phi_{x} ]
|
||||
//
|
||||
// = i [(\phi^\dag)_x ]_j P_jk [U_x(1+g) (M \phi)_x+\mu]_k (1)
|
||||
// + i [(\phi^\dagM^\dag)_x]_j P_jk [U_x(1-g) (\phi)_x+\mu]_k (2)
|
||||
// - i [(\phi^\dag)_x+mu (1-g) U^dag_x]_j P_jk [(M \phi)_xk (3)
|
||||
// - i [(\phi^\dagM^\dag)_x+mu (1+g) U^dag_x]_j P_jk [ \phi]_xk (4)
|
||||
//
|
||||
// Observe that (1)* = (4)
|
||||
// (2)* = (3)
|
||||
//
|
||||
// Write as .
|
||||
// P_{kj} = - i ( [U_x(1+g) (M \phi)_x+\mu] (x) [(\phi^\dag)_x] + [U_x(1-g) (\phi)_x+\mu] (x) [(\phi^\dagM^\dag)_x] - h.c )
|
||||
//
|
||||
// where (x) denotes outer product in colour and spins are traced.
|
||||
//
|
||||
// Need only evaluate (1) and (2) [Chroma] or (2) and (4) [IroIro] and take the
|
||||
// traceless anti hermitian part (of term in brackets w/o the "i")
|
||||
//
|
||||
// Generalisation to S=phi^dag (MdagM)^{-1} phi is simple:
|
||||
//
|
||||
// For more complicated DWF etc... apply product rule in differentiation
|
||||
//
|
||||
////////////////////////
|
||||
void DhopDeriv (LatticeGaugeField &mat,const LatticeFermion &U,const LatticeFermion &V,int dag);
|
||||
void DhopDerivEO(LatticeGaugeField &mat,const LatticeFermion &U,const LatticeFermion &V,int dag);
|
||||
void DhopDerivOE(LatticeGaugeField &mat,const LatticeFermion &U,const LatticeFermion &V,int dag);
|
||||
|
||||
// Extra support internal
|
||||
void DerivInternal(CartesianStencil & st,
|
||||
LatticeDoubledGaugeField & U,
|
||||
LatticeGaugeField &mat,
|
||||
const LatticeFermion &A,
|
||||
const LatticeFermion &B,
|
||||
int dag);
|
||||
|
||||
|
||||
// non-hermitian hopping term; half cb or both
|
||||
void Dhop (const LatticeFermion &in, LatticeFermion &out,int dag);
|
||||
void DhopOE(const LatticeFermion &in, LatticeFermion &out,int dag);
|
||||
@ -37,6 +121,7 @@ namespace Grid {
|
||||
// Multigrid assistance
|
||||
void Mdir (const LatticeFermion &in, LatticeFermion &out,int dir,int disp);
|
||||
void DhopDir(const LatticeFermion &in, LatticeFermion &out,int dir,int disp);
|
||||
void DhopDirDisp(const LatticeFermion &in, LatticeFermion &out,int dirdisp,int gamma,int dag);
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Extra methods added by derived
|
||||
@ -51,6 +136,7 @@ namespace Grid {
|
||||
WilsonFermion(LatticeGaugeField &_Umu,GridCartesian &Fgrid,GridRedBlackCartesian &Hgrid,RealD _mass);
|
||||
|
||||
// DoubleStore
|
||||
virtual void ImportGauge(const LatticeGaugeField &_Umu);
|
||||
void DoubleStore(LatticeDoubledGaugeField &Uds,const LatticeGaugeField &Umu);
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
@ -59,7 +145,8 @@ namespace Grid {
|
||||
static int HandOptDslash; // these are a temporary hack
|
||||
static int MortonOrder;
|
||||
|
||||
protected:
|
||||
// protected:
|
||||
public:
|
||||
|
||||
RealD mass;
|
||||
|
||||
|
@ -65,7 +65,10 @@ namespace QCD {
|
||||
|
||||
// Allocate the required comms buffer
|
||||
comm_buf.resize(Stencil._unified_buffer_size); // this is always big enough to contain EO
|
||||
|
||||
ImportGauge(_Umu);
|
||||
}
|
||||
void WilsonFermion5D::ImportGauge(const LatticeGaugeField &_Umu)
|
||||
{
|
||||
DoubleStore(Umu,_Umu);
|
||||
pickCheckerboard(Even,UmuEven,Umu);
|
||||
pickCheckerboard(Odd ,UmuOdd,Umu);
|
||||
@ -100,19 +103,111 @@ void WilsonFermion5D::DhopDir(const LatticeFermion &in, LatticeFermion &out,int
|
||||
assert(dirdisp<=7);
|
||||
assert(dirdisp>=0);
|
||||
|
||||
//PARALLEL_FOR_LOOP
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int ss=0;ss<Umu._grid->oSites();ss++){
|
||||
for(int s=0;s<Ls;s++){
|
||||
int sU=ss;
|
||||
int sF = s+Ls*sU;
|
||||
DiracOptDhopDir(Stencil,Umu,comm_buf,sF,sU,in,out,dirdisp);
|
||||
DiracOptDhopDir(Stencil,Umu,comm_buf,sF,sU,in,out,dirdisp,dirdisp);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void WilsonFermion5D::DerivInternal(CartesianStencil & st,
|
||||
LatticeDoubledGaugeField & U,
|
||||
LatticeGaugeField &mat,
|
||||
const LatticeFermion &A,
|
||||
const LatticeFermion &B,
|
||||
int dag)
|
||||
{
|
||||
assert((dag==DaggerNo) ||(dag==DaggerYes));
|
||||
|
||||
WilsonCompressor compressor(dag);
|
||||
|
||||
LatticeColourMatrix tmp(B._grid);
|
||||
LatticeFermion Btilde(B._grid);
|
||||
|
||||
st.HaloExchange<vSpinColourVector,vHalfSpinColourVector,WilsonCompressor>(B,comm_buf,compressor);
|
||||
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Flip gamma if dag
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
int gamma = mu;
|
||||
if ( dag ) gamma+= Nd;
|
||||
|
||||
////////////////////////
|
||||
// Call the single hop
|
||||
////////////////////////
|
||||
PARALLEL_FOR_LOOP
|
||||
for(int sss=0;sss<B._grid->oSites();sss++){
|
||||
for(int s=0;s<Ls;s++){
|
||||
int sU=sss;
|
||||
int sF = s+Ls*sU;
|
||||
DiracOptDhopDir(st,U,comm_buf,sF,sU,B,Btilde,mu,gamma);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////
|
||||
// spin trace outer product
|
||||
////////////////////////////
|
||||
// FIXME : need to sum over fifth direction.
|
||||
tmp = TraceIndex<SpinIndex>(outerProduct(Btilde,A)); // ordering here
|
||||
PokeIndex<LorentzIndex>(mat,tmp,mu);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
void WilsonFermion5D::DhopDeriv( LatticeGaugeField &mat,
|
||||
const LatticeFermion &A,
|
||||
const LatticeFermion &B,
|
||||
int dag)
|
||||
{
|
||||
conformable(A._grid,FermionGrid());
|
||||
conformable(A._grid,B._grid);
|
||||
conformable(GaugeGrid(),mat._grid);
|
||||
|
||||
mat.checkerboard = A.checkerboard;
|
||||
|
||||
DerivInternal(Stencil,Umu,mat,A,B,dag);
|
||||
}
|
||||
|
||||
void WilsonFermion5D::DhopDerivEO(LatticeGaugeField &mat,
|
||||
const LatticeFermion &A,
|
||||
const LatticeFermion &B,
|
||||
int dag)
|
||||
{
|
||||
conformable(A._grid,FermionRedBlackGrid());
|
||||
conformable(GaugeRedBlackGrid(),mat._grid);
|
||||
conformable(A._grid,B._grid);
|
||||
|
||||
assert(B.checkerboard==Odd);
|
||||
assert(A.checkerboard==Even);
|
||||
mat.checkerboard = Even;
|
||||
|
||||
DerivInternal(StencilOdd,UmuEven,mat,A,B,dag);
|
||||
}
|
||||
|
||||
void WilsonFermion5D::DhopDerivOE(LatticeGaugeField &mat,
|
||||
const LatticeFermion &A,
|
||||
const LatticeFermion &B,
|
||||
int dag)
|
||||
{
|
||||
conformable(A._grid,FermionRedBlackGrid());
|
||||
conformable(GaugeRedBlackGrid(),mat._grid);
|
||||
conformable(A._grid,B._grid);
|
||||
|
||||
assert(B.checkerboard==Even);
|
||||
assert(A.checkerboard==Odd);
|
||||
mat.checkerboard = Odd;
|
||||
|
||||
DerivInternal(StencilEven,UmuOdd,mat,A,B,dag);
|
||||
}
|
||||
|
||||
void WilsonFermion5D::DhopInternal(CartesianStencil & st, LebesgueOrder &lo,
|
||||
LatticeDoubledGaugeField & U,
|
||||
const LatticeFermion &in, LatticeFermion &out,int dag)
|
||||
const LatticeFermion &in, LatticeFermion &out,int dag)
|
||||
{
|
||||
// assert((dag==DaggerNo) ||(dag==DaggerYes));
|
||||
|
||||
|
@ -44,12 +44,18 @@ namespace Grid {
|
||||
|
||||
// half checkerboard operations; leave unimplemented as abstract for now
|
||||
virtual void Meooe (const LatticeFermion &in, LatticeFermion &out){assert(0);};
|
||||
virtual void MeooeDag (const LatticeFermion &in, LatticeFermion &out){assert(0);};
|
||||
virtual void Mooee (const LatticeFermion &in, LatticeFermion &out){assert(0);};
|
||||
virtual void MooeeDag (const LatticeFermion &in, LatticeFermion &out){assert(0);};
|
||||
virtual void MooeeInv (const LatticeFermion &in, LatticeFermion &out){assert(0);};
|
||||
|
||||
virtual void MeooeDag (const LatticeFermion &in, LatticeFermion &out){assert(0);};
|
||||
virtual void MooeeDag (const LatticeFermion &in, LatticeFermion &out){assert(0);};
|
||||
virtual void MooeeInvDag (const LatticeFermion &in, LatticeFermion &out){assert(0);};
|
||||
|
||||
// These can be overridden by fancy 5d chiral actions
|
||||
virtual void DhopDeriv (LatticeGaugeField &mat,const LatticeFermion &U,const LatticeFermion &V,int dag);
|
||||
virtual void DhopDerivEO(LatticeGaugeField &mat,const LatticeFermion &U,const LatticeFermion &V,int dag);
|
||||
virtual void DhopDerivOE(LatticeGaugeField &mat,const LatticeFermion &U,const LatticeFermion &V,int dag);
|
||||
|
||||
// Implement hopping term non-hermitian hopping term; half cb or both
|
||||
// Implement s-diagonal DW
|
||||
void DW (const LatticeFermion &in, LatticeFermion &out,int dag);
|
||||
@ -64,6 +70,14 @@ namespace Grid {
|
||||
///////////////////////////////////////////////////////////////
|
||||
// New methods added
|
||||
///////////////////////////////////////////////////////////////
|
||||
|
||||
void DerivInternal(CartesianStencil & st,
|
||||
LatticeDoubledGaugeField & U,
|
||||
LatticeGaugeField &mat,
|
||||
const LatticeFermion &A,
|
||||
const LatticeFermion &B,
|
||||
int dag);
|
||||
|
||||
void DhopInternal(CartesianStencil & st,
|
||||
LebesgueOrder &lo,
|
||||
LatticeDoubledGaugeField &U,
|
||||
@ -80,6 +94,7 @@ namespace Grid {
|
||||
double _M5);
|
||||
|
||||
// DoubleStore
|
||||
virtual void ImportGauge(const LatticeGaugeField &_Umu);
|
||||
void DoubleStore(LatticeDoubledGaugeField &Uds,const LatticeGaugeField &Umu);
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
|
@ -294,8 +294,8 @@ void DiracOptDhopSiteDag(CartesianStencil &st,LatticeDoubledGaugeField &U,
|
||||
}
|
||||
|
||||
void DiracOptDhopDir(CartesianStencil &st,LatticeDoubledGaugeField &U,
|
||||
std::vector<vHalfSpinColourVector,alignedAllocator<vHalfSpinColourVector> > &buf,
|
||||
int sF,int sU,const LatticeFermion &in, LatticeFermion &out,int dirdisp)
|
||||
std::vector<vHalfSpinColourVector,alignedAllocator<vHalfSpinColourVector> > &buf,
|
||||
int sF,int sU,const LatticeFermion &in, LatticeFermion &out,int dir,int gamma)
|
||||
{
|
||||
vHalfSpinColourVector tmp;
|
||||
vHalfSpinColourVector chi;
|
||||
@ -304,13 +304,13 @@ void DiracOptDhopDir(CartesianStencil &st,LatticeDoubledGaugeField &U,
|
||||
int offset,local,perm, ptype;
|
||||
int ss=sF;
|
||||
|
||||
offset = st._offsets [dirdisp][ss];
|
||||
local = st._is_local[dirdisp][ss];
|
||||
perm = st._permute[dirdisp][ss];
|
||||
ptype = st._permute_type[dirdisp];
|
||||
offset = st._offsets [dir][ss];
|
||||
local = st._is_local[dir][ss];
|
||||
perm = st._permute[dir][ss];
|
||||
ptype = st._permute_type[dir];
|
||||
|
||||
// Xp
|
||||
if(dirdisp==Xp){
|
||||
if(gamma==Xp){
|
||||
if ( local && perm ) {
|
||||
spProjXp(tmp,in._odata[offset]);
|
||||
permute(chi,tmp,ptype);
|
||||
@ -319,12 +319,12 @@ void DiracOptDhopDir(CartesianStencil &st,LatticeDoubledGaugeField &U,
|
||||
} else {
|
||||
chi=buf[offset];
|
||||
}
|
||||
mult(&Uchi(),&U._odata[sU](Xp),&chi());
|
||||
mult(&Uchi(),&U._odata[sU](dir),&chi());
|
||||
spReconXp(result,Uchi);
|
||||
}
|
||||
|
||||
// Yp
|
||||
if ( dirdisp==Yp ){
|
||||
if ( gamma==Yp ){
|
||||
if ( local && perm ) {
|
||||
spProjYp(tmp,in._odata[offset]);
|
||||
permute(chi,tmp,ptype);
|
||||
@ -333,12 +333,12 @@ void DiracOptDhopDir(CartesianStencil &st,LatticeDoubledGaugeField &U,
|
||||
} else {
|
||||
chi=buf[offset];
|
||||
}
|
||||
mult(&Uchi(),&U._odata[sU](Yp),&chi());
|
||||
mult(&Uchi(),&U._odata[sU](dir),&chi());
|
||||
spReconYp(result,Uchi);
|
||||
}
|
||||
|
||||
// Zp
|
||||
if ( dirdisp ==Zp ){
|
||||
if ( gamma ==Zp ){
|
||||
if ( local && perm ) {
|
||||
spProjZp(tmp,in._odata[offset]);
|
||||
permute(chi,tmp,ptype);
|
||||
@ -347,12 +347,12 @@ void DiracOptDhopDir(CartesianStencil &st,LatticeDoubledGaugeField &U,
|
||||
} else {
|
||||
chi=buf[offset];
|
||||
}
|
||||
mult(&Uchi(),&U._odata[sU](Zp),&chi());
|
||||
mult(&Uchi(),&U._odata[sU](dir),&chi());
|
||||
spReconZp(result,Uchi);
|
||||
}
|
||||
|
||||
// Tp
|
||||
if ( dirdisp ==Tp ){
|
||||
if ( gamma ==Tp ){
|
||||
if ( local && perm ) {
|
||||
spProjTp(tmp,in._odata[offset]);
|
||||
permute(chi,tmp,ptype);
|
||||
@ -361,12 +361,12 @@ void DiracOptDhopDir(CartesianStencil &st,LatticeDoubledGaugeField &U,
|
||||
} else {
|
||||
chi=buf[offset];
|
||||
}
|
||||
mult(&Uchi(),&U._odata[sU](Tp),&chi());
|
||||
mult(&Uchi(),&U._odata[sU](dir),&chi());
|
||||
spReconTp(result,Uchi);
|
||||
}
|
||||
|
||||
// Xm
|
||||
if ( dirdisp==Xm ){
|
||||
if ( gamma==Xm ){
|
||||
if ( local && perm ) {
|
||||
spProjXm(tmp,in._odata[offset]);
|
||||
permute(chi,tmp,ptype);
|
||||
@ -375,12 +375,12 @@ void DiracOptDhopDir(CartesianStencil &st,LatticeDoubledGaugeField &U,
|
||||
} else {
|
||||
chi=buf[offset];
|
||||
}
|
||||
mult(&Uchi(),&U._odata[sU](Xm),&chi());
|
||||
mult(&Uchi(),&U._odata[sU](dir),&chi());
|
||||
spReconXm(result,Uchi);
|
||||
}
|
||||
|
||||
// Ym
|
||||
if ( dirdisp == Ym ){
|
||||
if ( gamma == Ym ){
|
||||
if ( local && perm ) {
|
||||
spProjYm(tmp,in._odata[offset]);
|
||||
permute(chi,tmp,ptype);
|
||||
@ -389,12 +389,12 @@ void DiracOptDhopDir(CartesianStencil &st,LatticeDoubledGaugeField &U,
|
||||
} else {
|
||||
chi=buf[offset];
|
||||
}
|
||||
mult(&Uchi(),&U._odata[sU](Ym),&chi());
|
||||
mult(&Uchi(),&U._odata[sU](dir),&chi());
|
||||
spReconYm(result,Uchi);
|
||||
}
|
||||
|
||||
// Zm
|
||||
if ( dirdisp == Zm ){
|
||||
if ( gamma == Zm ){
|
||||
if ( local && perm ) {
|
||||
spProjZm(tmp,in._odata[offset]);
|
||||
permute(chi,tmp,ptype);
|
||||
@ -403,12 +403,12 @@ void DiracOptDhopDir(CartesianStencil &st,LatticeDoubledGaugeField &U,
|
||||
} else {
|
||||
chi=buf[offset];
|
||||
}
|
||||
mult(&Uchi(),&U._odata[sU](Zm),&chi());
|
||||
mult(&Uchi(),&U._odata[sU](dir),&chi());
|
||||
spReconZm(result,Uchi);
|
||||
}
|
||||
|
||||
// Tm
|
||||
if ( dirdisp==Tm ) {
|
||||
if ( gamma==Tm ) {
|
||||
if ( local && perm ) {
|
||||
spProjTm(tmp,in._odata[offset]);
|
||||
permute(chi,tmp,ptype);
|
||||
@ -417,7 +417,7 @@ void DiracOptDhopDir(CartesianStencil &st,LatticeDoubledGaugeField &U,
|
||||
} else {
|
||||
chi=buf[offset];
|
||||
}
|
||||
mult(&Uchi(),&U._odata[sU](Tm),&chi());
|
||||
mult(&Uchi(),&U._odata[sU](dir),&chi());
|
||||
spReconTm(result,Uchi);
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@ namespace Grid {
|
||||
int sF,int sU,const LatticeFermion &in, LatticeFermion &out);
|
||||
void DiracOptDhopDir(CartesianStencil &st,LatticeDoubledGaugeField &U,
|
||||
std::vector<vHalfSpinColourVector,alignedAllocator<vHalfSpinColourVector> > &buf,
|
||||
int sF,int sU,const LatticeFermion &in, LatticeFermion &out,int dirdisp);
|
||||
int sF,int sU,const LatticeFermion &in, LatticeFermion &out,int dirdisp,int gamma);
|
||||
|
||||
// };
|
||||
|
||||
|
@ -360,11 +360,11 @@ void DiracOptHandDhopSite(CartesianStencil &st,LatticeDoubledGaugeField &U,
|
||||
MULT_2SPIN(Xp);
|
||||
}
|
||||
XP_RECON;
|
||||
// std::cout << "XP_RECON"<<std::endl;
|
||||
// std::cout << result_00 <<" "<<result_01 <<" "<<result_02 <<std::endl;
|
||||
// std::cout << result_10 <<" "<<result_11 <<" "<<result_12 <<std::endl;
|
||||
// std::cout << result_20 <<" "<<result_21 <<" "<<result_22 <<std::endl;
|
||||
// std::cout << result_30 <<" "<<result_31 <<" "<<result_32 <<std::endl;
|
||||
// std::cout<<GridLogMessage << "XP_RECON"<<std::endl;
|
||||
// std::cout<<GridLogMessage << result_00 <<" "<<result_01 <<" "<<result_02 <<std::endl;
|
||||
// std::cout<<GridLogMessage << result_10 <<" "<<result_11 <<" "<<result_12 <<std::endl;
|
||||
// std::cout<<GridLogMessage << result_20 <<" "<<result_21 <<" "<<result_22 <<std::endl;
|
||||
// std::cout<<GridLogMessage << result_30 <<" "<<result_31 <<" "<<result_32 <<std::endl;
|
||||
|
||||
// Yp
|
||||
offset = st._offsets [Yp][ss];
|
||||
@ -446,11 +446,11 @@ void DiracOptHandDhopSite(CartesianStencil &st,LatticeDoubledGaugeField &U,
|
||||
MULT_2SPIN(Xm);
|
||||
}
|
||||
XM_RECON_ACCUM;
|
||||
// std::cout << "XM_RECON_ACCUM"<<std::endl;
|
||||
// std::cout << result_00 <<" "<<result_01 <<" "<<result_02 <<std::endl;
|
||||
// std::cout << result_10 <<" "<<result_11 <<" "<<result_12 <<std::endl;
|
||||
// std::cout << result_20 <<" "<<result_21 <<" "<<result_22 <<std::endl;
|
||||
// std::cout << result_30 <<" "<<result_31 <<" "<<result_32 <<std::endl;
|
||||
// std::cout<<GridLogMessage << "XM_RECON_ACCUM"<<std::endl;
|
||||
// std::cout<<GridLogMessage << result_00 <<" "<<result_01 <<" "<<result_02 <<std::endl;
|
||||
// std::cout<<GridLogMessage << result_10 <<" "<<result_11 <<" "<<result_12 <<std::endl;
|
||||
// std::cout<<GridLogMessage << result_20 <<" "<<result_21 <<" "<<result_22 <<std::endl;
|
||||
// std::cout<<GridLogMessage << result_30 <<" "<<result_31 <<" "<<result_32 <<std::endl;
|
||||
|
||||
|
||||
// Ym
|
||||
|
@ -18,9 +18,11 @@ namespace Grid{
|
||||
|
||||
virtual RealD S(const GaugeField &U) {
|
||||
RealD plaq = WilsonLoops<MatrixField,GaugeField>::avgPlaquette(U);
|
||||
std::cout << "Plaq : "<<plaq << "\n";
|
||||
double vol = U._grid->gSites();
|
||||
return beta*(1.0 -plaq)*(Nd*(Nd-1.0))*vol*0.5;
|
||||
std::cout<<GridLogMessage << "Plaq : "<<plaq << "\n";
|
||||
RealD vol = U._grid->gSites();
|
||||
RealD action=beta*(1.0 -plaq)*(Nd*(Nd-1.0))*vol*0.5;
|
||||
std::cout << GridLogMessage << "WilsonGauge action "<<action<<std::endl;
|
||||
return action;
|
||||
};
|
||||
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
|
||||
//not optimal implementation FIXME
|
||||
|
209
lib/qcd/action/pseudofermion/TwoFlavour.h
Normal file
209
lib/qcd/action/pseudofermion/TwoFlavour.h
Normal file
@ -0,0 +1,209 @@
|
||||
#ifndef QCD_PSEUDOFERMION_TWO_FLAVOUR_H
|
||||
#define QCD_PSEUDOFERMION_TWO_FLAVOUR_H
|
||||
|
||||
namespace Grid{
|
||||
namespace QCD{
|
||||
|
||||
// Placeholder comments:
|
||||
|
||||
///////////////////////////////////////
|
||||
// Two flavour ratio
|
||||
///////////////////////////////////////
|
||||
// S = phi^dag V (Mdag M)^-1 V^dag phi
|
||||
// dS/du = phi^dag dV (Mdag M)^-1 V^dag phi
|
||||
// - phi^dag V (Mdag M)^-1 [ Mdag dM + dMdag M ] (Mdag M)^-1 V^dag phi
|
||||
// + phi^dag V (Mdag M)^-1 dV^dag phi
|
||||
|
||||
///////////////////////////////////////
|
||||
// One flavour rational
|
||||
///////////////////////////////////////
|
||||
|
||||
// S_f = chi^dag * N(M^dag*M)/D(M^dag*M) * chi
|
||||
//
|
||||
// Here, M is some operator
|
||||
// N and D makeup the rat. poly
|
||||
//
|
||||
// Need
|
||||
// dS_f/dU = chi^dag P/Q d[N/D] P/Q chi
|
||||
//
|
||||
// Here N/D \sim R_{-1/2} ~ (M^dagM)^{-1/2}
|
||||
//
|
||||
// N/D is expressed as partial fraction expansion:
|
||||
//
|
||||
// a0 + \sum_k ak/(M^dagM + bk)
|
||||
//
|
||||
// d[N/D] is then
|
||||
//
|
||||
// \sum_k -ak [M^dagM+bk]^{-1} [ dM^dag M + M^dag dM ] [M^dag M + bk]^{-1}
|
||||
//
|
||||
// Need
|
||||
//
|
||||
// Mf Phi_k = [MdagM+bk]^{-1} Phi
|
||||
// Mf Phi = \sum_k ak [MdagM+bk]^{-1} Phi
|
||||
//
|
||||
// With these building blocks
|
||||
//
|
||||
// dS/dU = \sum_k -ak Mf Phi_k^dag [ dM^dag M + M^dag dM ] Mf Phi_k
|
||||
// S = innerprodReal(Phi,Mf Phi);
|
||||
|
||||
///////////////////////////////////////
|
||||
// One flavour rational ratio
|
||||
///////////////////////////////////////
|
||||
|
||||
// S_f = chi^dag* P(V^dag*V)/Q(V^dag*V)* N(M^dag*M)/D(M^dag*M)* P(V^dag*V)/Q(V^dag*V)* chi
|
||||
//
|
||||
// Here, M is some 5D operator and V is the Pauli-Villars field
|
||||
// N and D makeup the rat. poly of the M term and P and & makeup the rat.poly of the denom term
|
||||
//
|
||||
// Need
|
||||
// dS_f/dU = chi^dag d[P/Q] N/D P/Q chi
|
||||
// + chi^dag P/Q d[N/D] P/Q chi
|
||||
// + chi^dag P/Q N/D d[P/Q] chi
|
||||
//
|
||||
// Here P/Q \sim R_{1/4} ~ (V^dagV)^{1/4}
|
||||
// Here N/D \sim R_{-1/2} ~ (M^dagM)^{-1/2}
|
||||
//
|
||||
// P/Q is expressed as partial fraction expansion:
|
||||
//
|
||||
// a0 + \sum_k ak/(V^dagV + bk)
|
||||
//
|
||||
// d[P/Q] is then
|
||||
//
|
||||
// \sum_k -ak [V^dagV+bk]^{-1} [ dV^dag V + V^dag dV ] [V^dag V + bk]^{-1}
|
||||
//
|
||||
// and similar for N/D.
|
||||
//
|
||||
// Need
|
||||
// MpvPhi_k = [Vdag V + bk]^{-1} chi
|
||||
//
|
||||
// MpvPhi = {a0 + \sum_k ak [Vdag V + bk]^{-1} }chi
|
||||
//
|
||||
// MfMpvPhi_k = [MdagM+bk]^{-1} MpvPhi
|
||||
//
|
||||
// MfMpvPhi = {a0 + \sum_k ak [Mdag M + bk]^{-1} } MpvPhi
|
||||
//
|
||||
// MpvMfMpvPhi_k = [Vdag V + bk]^{-1} MfMpvchi
|
||||
//
|
||||
// With these building blocks
|
||||
//
|
||||
// dS/dU =
|
||||
// \sum_k -ak MpvPhi_k^dag [ dV^dag V + V^dag dV ] MpvMfMpvPhi_k <- deriv on P left
|
||||
// + \sum_k -ak MpvMfMpvPhi_k^\dag [ dV^dag V + V^dag dV ] MpvPhi_k
|
||||
// + \sum_k -ak MfMpvPhi_k^dag [ dM^dag M + M^dag dM ] MfMpvPhi_k
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Two flavour pseudofermion action for any dop
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
template<class GaugeField,class MatrixField,class FermionField>
|
||||
class TwoFlavourPseudoFermionAction : public Action<GaugeField> {
|
||||
|
||||
private:
|
||||
|
||||
FermionOperator<FermionField,GaugeField> & FermOp;// the basic operator
|
||||
|
||||
OperatorFunction<FermionField> &DerivativeSolver;
|
||||
|
||||
OperatorFunction<FermionField> &ActionSolver;
|
||||
|
||||
GridBase &Grid;
|
||||
|
||||
FermionField Phi; // the pseudo fermion field for this trajectory
|
||||
|
||||
public:
|
||||
/////////////////////////////////////////////////
|
||||
// Pass in required objects.
|
||||
/////////////////////////////////////////////////
|
||||
TwoFlavourPseudoFermionAction(FermionOperator<FermionField,GaugeField> &Op,
|
||||
OperatorFunction<FermionField> & DS,
|
||||
OperatorFunction<FermionField> & AS,
|
||||
GridBase &_Grid
|
||||
) : FermOp(Op), DerivativeSolver(DS), ActionSolver(AS), Phi(&_Grid), Grid(_Grid) {
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
// Push the gauge field in to the dops. Assume any BC's and smearing already applied
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
virtual void init(const GaugeField &U, GridParallelRNG& pRNG) {
|
||||
|
||||
// P(phi) = e^{- phi^dag (MdagM)^-1 phi}
|
||||
// Phi = Mdag eta
|
||||
// P(eta) = e^{- eta^dag eta}
|
||||
//
|
||||
// e^{x^2/2 sig^2} => sig^2 = 0.5.
|
||||
//
|
||||
// So eta should be of width sig = 1/sqrt(2).
|
||||
// and must multiply by 0.707....
|
||||
//
|
||||
// Chroma has this scale factor: two_flavor_monomial_w.h
|
||||
// IroIro: does not use this scale. It is absorbed by a change of vars
|
||||
// in the Phi integral, and thus is only an irrelevant prefactor for the partition function.
|
||||
//
|
||||
RealD scale = std::sqrt(0.5);
|
||||
FermionField eta(&Grid);
|
||||
|
||||
gaussian(pRNG,eta);
|
||||
|
||||
FermOp.Mdag(eta,Phi);
|
||||
|
||||
Phi=Phi*scale;
|
||||
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// S = phi^dag (Mdag M)^-1 phi
|
||||
//////////////////////////////////////////////////////
|
||||
virtual RealD S(const GaugeField &U) {
|
||||
|
||||
FermOp.ImportGauge(U);
|
||||
|
||||
FermionField X(&Grid);
|
||||
FermionField Y(&Grid);
|
||||
|
||||
MdagMLinearOperator<FermionOperator<FermionField,GaugeField> ,FermionField> MdagMOp(FermOp);
|
||||
X=zero;
|
||||
ActionSolver(MdagMOp,Phi,X);
|
||||
MdagMOp.Op(X,Y);
|
||||
|
||||
RealD action = norm2(Y);
|
||||
std::cout << GridLogMessage << "Pseudofermion action "<<action<<std::endl;
|
||||
return action;
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// dS/du = - phi^dag (Mdag M)^-1 [ Mdag dM + dMdag M ] (Mdag M)^-1 phi
|
||||
// = - phi^dag M^-1 dM (MdagM)^-1 phi - phi^dag (MdagM)^-1 dMdag dM (Mdag)^-1 phi
|
||||
//
|
||||
// = - Ydag dM X - Xdag dMdag Y
|
||||
//
|
||||
//////////////////////////////////////////////////////
|
||||
virtual void deriv(const GaugeField &U,GaugeField & dSdU) {
|
||||
|
||||
FermOp.ImportGauge(U);
|
||||
|
||||
FermionField X(&Grid);
|
||||
FermionField Y(&Grid);
|
||||
GaugeField tmp(&Grid);
|
||||
|
||||
MdagMLinearOperator<FermionOperator<FermionField,GaugeField> ,FermionField> MdagMOp(FermOp);
|
||||
|
||||
X=zero;
|
||||
DerivativeSolver(MdagMOp,Phi,X);
|
||||
MdagMOp.Op(X,Y);
|
||||
|
||||
// Our conventions really make this UdSdU; We do not differentiate wrt Udag here.
|
||||
// So must take dSdU - adj(dSdU) and left multiply by mom to get dS/dt.
|
||||
|
||||
FermOp.MDeriv(tmp , Y, X,DaggerNo ); dSdU=tmp;
|
||||
FermOp.MDeriv(tmp , X, Y,DaggerYes); dSdU=dSdU+tmp;
|
||||
|
||||
dSdU = Ta(dSdU);
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
@ -7,8 +7,8 @@ namespace Grid{
|
||||
// FIXME fill this constructor now just default values
|
||||
|
||||
////////////////////////////// Default values
|
||||
Nsweeps = 100;
|
||||
TotalSweeps = 20;
|
||||
Nsweeps = 200;
|
||||
TotalSweeps = 220;
|
||||
ThermalizationSteps = 20;
|
||||
StartingConfig = 0;
|
||||
SaveInterval = 1;
|
||||
@ -17,8 +17,5 @@ namespace Grid{
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -3,7 +3,7 @@
|
||||
* @brief Classes for Hybrid Monte Carlo update
|
||||
*
|
||||
* @author Guido Cossu
|
||||
* Time-stamp: <2015-07-07 14:58:13 neo>
|
||||
* Time-stamp: <2015-07-30 16:58:26 neo>
|
||||
*/
|
||||
//--------------------------------------------------------------------
|
||||
#ifndef HMC_INCLUDED
|
||||
@ -28,75 +28,89 @@ namespace Grid{
|
||||
|
||||
template <class Algorithm>
|
||||
class HybridMonteCarlo{
|
||||
|
||||
const HMCparameters Params;
|
||||
GridSerialRNG sRNG;
|
||||
|
||||
GridSerialRNG sRNG; // Fixme: need a RNG management strategy.
|
||||
|
||||
Integrator<Algorithm>& MD;
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////
|
||||
// Metropolis step
|
||||
/////////////////////////////////////////////////////////
|
||||
bool metropolis_test(const RealD DeltaH){
|
||||
|
||||
RealD rn_test;
|
||||
|
||||
RealD prob = std::exp(-DeltaH);
|
||||
|
||||
random(sRNG,rn_test);
|
||||
|
||||
std::cout<< "--------------------------------------------\n";
|
||||
std::cout<< "dH = "<<DeltaH << " Random = "<< rn_test
|
||||
<< "\nAcc. Probability = " << ((prob<1.0)? prob: 1.0)<< " ";
|
||||
std::cout<<GridLogMessage<< "--------------------------------------------\n";
|
||||
std::cout<<GridLogMessage<< "dH = "<<DeltaH << " Random = "<< rn_test <<"\n";
|
||||
std::cout<<GridLogMessage<< "Acc. Probability = " << ((prob<1.0)? prob: 1.0)<< " ";
|
||||
|
||||
if((prob >1.0) || (rn_test <= prob)){ // accepted
|
||||
std::cout <<"-- ACCEPTED\n";
|
||||
std::cout<<GridLogMessage <<"-- ACCEPTED\n";
|
||||
return true;
|
||||
} else { // rejected
|
||||
std::cout <<"-- REJECTED\n";
|
||||
std::cout<<GridLogMessage <<"-- REJECTED\n";
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////
|
||||
// Evolution
|
||||
/////////////////////////////////////////////////////////
|
||||
RealD evolve_step(LatticeGaugeField& U){
|
||||
MD.init(U); // set U and initialize P and phi's
|
||||
|
||||
RealD H0 = MD.S(U); // initial state action
|
||||
std::cout<<"Total H before = "<< H0 << "\n";
|
||||
|
||||
std::cout<<GridLogMessage<<"Total H before = "<< H0 << "\n";
|
||||
|
||||
MD.integrate(U);
|
||||
|
||||
RealD H1 = MD.S(U); // updated state action
|
||||
std::cout<<"Total H after = "<< H1 << "\n";
|
||||
|
||||
std::cout<<GridLogMessage<<"Total H after = "<< H1 << "\n";
|
||||
return (H1-H0);
|
||||
}
|
||||
|
||||
public:
|
||||
HybridMonteCarlo(HMCparameters Pms,
|
||||
Integrator<Algorithm>& MolDyn):
|
||||
Params(Pms),MD(MolDyn){
|
||||
//FIXME
|
||||
|
||||
// initialize RNGs also with seed
|
||||
/////////////////////////////////////////
|
||||
// Constructor
|
||||
/////////////////////////////////////////
|
||||
HybridMonteCarlo(HMCparameters Pms, Integrator<Algorithm>& MolDyn): Params(Pms),MD(MolDyn) {
|
||||
|
||||
//FIXME... initialize RNGs also with seed ; RNG management strategy
|
||||
sRNG.SeedRandomDevice();
|
||||
|
||||
}
|
||||
~HybridMonteCarlo(){};
|
||||
|
||||
|
||||
void evolve(LatticeGaugeField& Uin){
|
||||
Real DeltaH;
|
||||
|
||||
// Thermalizations
|
||||
for(int iter=1; iter <= Params.ThermalizationSteps; ++iter){
|
||||
std::cout << "-- # Thermalization step = "<< iter << "\n";
|
||||
std::cout<<GridLogMessage << "-- # Thermalization step = "<< iter << "\n";
|
||||
|
||||
DeltaH = evolve_step(Uin);
|
||||
std::cout<< " dH = "<< DeltaH << "\n";
|
||||
std::cout<<GridLogMessage<< "dH = "<< DeltaH << "\n";
|
||||
}
|
||||
|
||||
// Actual updates (evolve a copy Ucopy then copy back eventually)
|
||||
LatticeGaugeField Ucopy(Uin._grid);
|
||||
for(int iter=Params.StartingConfig;
|
||||
iter < Params.Nsweeps+Params.StartingConfig; ++iter){
|
||||
std::cout << "-- # Sweep = "<< iter << "\n";
|
||||
std::cout<<GridLogMessage << "-- # Sweep = "<< iter << "\n";
|
||||
|
||||
Ucopy = Uin;
|
||||
DeltaH = evolve_step(Ucopy);
|
||||
|
||||
if(metropolis_test(DeltaH)) Uin = Ucopy;
|
||||
|
||||
// here save config and RNG seed
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -3,7 +3,7 @@
|
||||
* @brief Classes for the Molecular Dynamics integrator
|
||||
*
|
||||
* @author Guido Cossu
|
||||
* Time-stamp: <2015-07-07 14:58:40 neo>
|
||||
* Time-stamp: <2015-07-30 16:21:29 neo>
|
||||
*/
|
||||
//--------------------------------------------------------------------
|
||||
|
||||
@ -71,7 +71,6 @@ namespace Grid{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void update_U(LatticeGaugeField&U, double ep){
|
||||
//rewrite exponential to deal automatically with the lorentz index?
|
||||
LatticeColourMatrix Umu(U._grid);
|
||||
@ -85,7 +84,6 @@ namespace Grid{
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
friend void IntegratorAlgorithm::step (LatticeGaugeField& U,
|
||||
int level, std::vector<int>& clock,
|
||||
@ -99,11 +97,9 @@ namespace Grid{
|
||||
|
||||
~Integrator(){}
|
||||
|
||||
|
||||
//Initialization of momenta and actions
|
||||
void init(LatticeGaugeField& U){
|
||||
std::cout<< "Integrator init\n";
|
||||
|
||||
std::cout<<GridLogMessage<< "Integrator init\n";
|
||||
MDutils::generate_momenta(*P,pRNG);
|
||||
for(int level=0; level< as.size(); ++level){
|
||||
for(int actionID=0; actionID<as[level].actions.size(); ++actionID){
|
||||
@ -112,7 +108,6 @@ namespace Grid{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Calculate action
|
||||
RealD S(LatticeGaugeField& U){
|
||||
LatticeComplex Hloc(U._grid);
|
||||
@ -126,12 +121,14 @@ namespace Grid{
|
||||
|
||||
RealD H = Hsum.real();
|
||||
|
||||
std::cout << "H_p = "<< H << "\n";
|
||||
std::cout<<GridLogMessage << "Momentum action H_p = "<< H << "\n";
|
||||
|
||||
// Actions
|
||||
for(int level=0; level<as.size(); ++level)
|
||||
for(int actionID=0; actionID<as[level].actions.size(); ++actionID)
|
||||
H += as[level].actions.at(actionID)->S(U);
|
||||
|
||||
std::cout<<GridLogMessage << "Total action H = "<< H << "\n";
|
||||
|
||||
return H;
|
||||
}
|
||||
|
@ -33,35 +33,32 @@ namespace Grid{
|
||||
for(int l=1; l<=level; ++l) fin*= 2.0*Integ->as[l].multiplier;
|
||||
fin = 3*Integ->Params.MDsteps*fin -1;
|
||||
|
||||
|
||||
for(int e=0; e<Integ->as[level].multiplier; ++e){
|
||||
|
||||
if(clock[level] == 0){ // initial half step
|
||||
Integ->update_P(U,level,lambda*eps);
|
||||
++clock[level];
|
||||
for(int l=0; l<level;++l) std::cout<<" ";
|
||||
std::cout<<"P "<< clock[level] <<std::endl;
|
||||
for(int l=0; l<level;++l) std::cout<<GridLogMessage<<" ";
|
||||
std::cout<<GridLogMessage<<"P "<< clock[level] <<std::endl;
|
||||
}
|
||||
|
||||
if(level == fl){ // lowest level
|
||||
Integ->update_U(U,0.5*eps);
|
||||
|
||||
for(int l=0; l<level;++l) std::cout<<" ";
|
||||
std::cout<<"U "<< (clock[level]+1) <<std::endl;
|
||||
for(int l=0; l<level;++l) std::cout<<GridLogMessage<<" ";
|
||||
std::cout<<GridLogMessage<<"U "<< (clock[level]+1) <<std::endl;
|
||||
}else{ // recursive function call
|
||||
step(U,level+1,clock, Integ);
|
||||
}
|
||||
|
||||
Integ->update_P(U,level,(1.0-2.0*lambda)*eps);
|
||||
++clock[level];
|
||||
for(int l=0; l<level;++l) std::cout<<" ";
|
||||
std::cout<<"P "<< (clock[level]) <<std::endl;
|
||||
for(int l=0; l<level;++l) std::cout<<GridLogMessage<<" ";
|
||||
std::cout<<GridLogMessage<<"P "<< (clock[level]) <<std::endl;
|
||||
|
||||
if(level == fl){ // lowest level
|
||||
Integ->update_U(U,0.5*eps);
|
||||
|
||||
for(int l=0; l<level;++l) std::cout<<" ";
|
||||
std::cout<<"U "<< (clock[level]+1) <<std::endl;
|
||||
for(int l=0; l<level;++l) std::cout<<GridLogMessage<<" ";
|
||||
std::cout<<GridLogMessage<<"U "<< (clock[level]+1) <<std::endl;
|
||||
}else{ // recursive function call
|
||||
step(U,level+1,clock, Integ);
|
||||
}
|
||||
@ -71,19 +68,17 @@ namespace Grid{
|
||||
Integ->update_P(U,level,lambda*eps);
|
||||
|
||||
++clock[level];
|
||||
for(int l=0; l<level;++l) std::cout<<" ";
|
||||
std::cout<<"P "<< clock[level] <<std::endl;
|
||||
for(int l=0; l<level;++l) std::cout<<GridLogMessage<<" ";
|
||||
std::cout<<GridLogMessage<<"P "<< clock[level] <<std::endl;
|
||||
}else{ // bulk step
|
||||
Integ->update_P(U,level,lambda*2.0*eps);
|
||||
|
||||
clock[level]+=2;
|
||||
for(int l=0; l<level;++l) std::cout<<" ";
|
||||
std::cout<<"P "<< clock[level] <<std::endl;
|
||||
for(int l=0; l<level;++l) std::cout<<GridLogMessage<<" ";
|
||||
std::cout<<GridLogMessage<<"P "<< clock[level] <<std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
};
|
||||
@ -93,6 +88,7 @@ namespace Grid{
|
||||
void step (LatticeLorentzColourMatrix& U,
|
||||
int level, std::vector<int>& clock,
|
||||
Integrator<LeapFrog>* Integ){
|
||||
|
||||
// level : current level
|
||||
// fl : final level
|
||||
// eps : current step size
|
||||
@ -112,34 +108,32 @@ namespace Grid{
|
||||
if(clock[level] == 0){ // initial half step
|
||||
Integ->update_P(U, level,eps/2.0);
|
||||
++clock[level];
|
||||
for(int l=0; l<level;++l) std::cout<<" ";
|
||||
std::cout<<"P "<< 0.5*clock[level] <<std::endl;
|
||||
for(int l=0; l<level;++l) std::cout<<GridLogMessage<<" ";
|
||||
std::cout<<GridLogMessage<<"P "<< 0.5*clock[level] <<std::endl;
|
||||
}
|
||||
|
||||
if(level == fl){ // lowest level
|
||||
Integ->update_U(U, eps);
|
||||
for(int l=0; l<level;++l) std::cout<<" ";
|
||||
std::cout<<"U "<< 0.5*(clock[level]+1) <<std::endl;
|
||||
for(int l=0; l<level;++l) std::cout<<GridLogMessage<<" ";
|
||||
std::cout<<GridLogMessage<<"U "<< 0.5*(clock[level]+1) <<std::endl;
|
||||
}else{ // recursive function call
|
||||
step(U, level+1,clock, Integ);
|
||||
}
|
||||
|
||||
if(clock[level] == fin){ // final half step
|
||||
Integ->update_P(U, level,eps/2.0);
|
||||
|
||||
++clock[level];
|
||||
for(int l=0; l<level;++l) std::cout<<" ";
|
||||
std::cout<<"P "<< 0.5*clock[level] <<std::endl;
|
||||
for(int l=0; l<level;++l) std::cout<<GridLogMessage<<" ";
|
||||
std::cout<<GridLogMessage<<"P "<< 0.5*clock[level] <<std::endl;
|
||||
}else{ // bulk step
|
||||
Integ->update_P(U, level,eps);
|
||||
|
||||
clock[level]+=2;
|
||||
for(int l=0; l<level;++l) std::cout<<" ";
|
||||
std::cout<<"P "<< 0.5*clock[level] <<std::endl;
|
||||
for(int l=0; l<level;++l) std::cout<<GridLogMessage<<" ";
|
||||
std::cout<<GridLogMessage<<"P "<< 0.5*clock[level] <<std::endl;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -65,6 +65,18 @@ namespace Grid{
|
||||
for(int a=0; a<as[level].size(); ++a){
|
||||
LatticeLorentzColourMatrix force(U._grid);
|
||||
as[level].at(a)->deriv(U,force);
|
||||
|
||||
Complex dSdt=0.0;
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
LatticeColourMatrix forcemu(U._grid);
|
||||
LatticeColourMatrix mommu(U._grid);
|
||||
forcemu=PeekIndex<LorentzIndex>(force,mu);
|
||||
mommu=PeekIndex<LorentzIndex>(*P,mu);
|
||||
|
||||
dSdt += sum(trace(forcemu*(*P)));
|
||||
|
||||
}
|
||||
std::cout << GridLogMessage << " action "<<level<<","<<a<<" dSdt "<< dSdt << " dt "<<ep <<std::endl;
|
||||
*P -= force*ep;
|
||||
}
|
||||
}
|
||||
@ -101,7 +113,7 @@ namespace Grid{
|
||||
//Initialization of momenta and actions
|
||||
void init(LatticeLorentzColourMatrix& U,
|
||||
GridParallelRNG& pRNG){
|
||||
std::cout<< "Integrator init\n";
|
||||
std::cout<<GridLogMessage<< "Integrator init\n";
|
||||
if (!P)
|
||||
P = new LatticeLorentzColourMatrix(U._grid);
|
||||
MDutils::generate_momenta(*P,pRNG);
|
||||
@ -172,13 +184,13 @@ namespace Grid{
|
||||
if(clock[level] == 0){ // initial half step
|
||||
Integ->update_P(U, level,eps/2);
|
||||
++clock[level];
|
||||
for(int l=0; l<level;++l) std::cout<<" ";
|
||||
std::cout<<"P "<< 0.5*clock[level] <<std::endl;
|
||||
for(int l=0; l<level;++l) std::cout<<GridLogMessage<<" ";
|
||||
std::cout<<GridLogMessage<<"P "<< 0.5*clock[level] <<std::endl;
|
||||
}
|
||||
if(level == fl){ // lowest level
|
||||
Integ->update_U(U, eps);
|
||||
for(int l=0; l<level;++l) std::cout<<" ";
|
||||
std::cout<<"U "<< 0.5*(clock[level]+1) <<std::endl;
|
||||
for(int l=0; l<level;++l) std::cout<<GridLogMessage<<" ";
|
||||
std::cout<<GridLogMessage<<"U "<< 0.5*(clock[level]+1) <<std::endl;
|
||||
}else{ // recursive function call
|
||||
step(U, level+1,clock, Integ);
|
||||
}
|
||||
@ -186,14 +198,14 @@ namespace Grid{
|
||||
Integ->update_P(U, level,eps/2);
|
||||
|
||||
++clock[level];
|
||||
for(int l=0; l<level;++l) std::cout<<" ";
|
||||
std::cout<<"P "<< 0.5*clock[level] <<std::endl;
|
||||
for(int l=0; l<level;++l) std::cout<<GridLogMessage<<" ";
|
||||
std::cout<<GridLogMessage<<"P "<< 0.5*clock[level] <<std::endl;
|
||||
}else{ // bulk step
|
||||
Integ->update_P(U, level,eps);
|
||||
|
||||
clock[level]+=2;
|
||||
for(int l=0; l<level;++l) std::cout<<" ";
|
||||
std::cout<<"P "<< 0.5*clock[level] <<std::endl;
|
||||
for(int l=0; l<level;++l) std::cout<<GridLogMessage<<" ";
|
||||
std::cout<<GridLogMessage<<"P "<< 0.5*clock[level] <<std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -372,7 +372,7 @@ Note that in step D setting B ~ X - A and using B in place of A in step E will g
|
||||
LatticeReal d(grid); d=zero;
|
||||
LatticeReal alpha(grid);
|
||||
|
||||
// std::cout<<"xi "<<xi <<std::endl;
|
||||
// std::cout<<GridLogMessage<<"xi "<<xi <<std::endl;
|
||||
alpha = toReal(2.0*xi);
|
||||
|
||||
do {
|
||||
@ -468,11 +468,11 @@ Note that in step D setting B ~ X - A and using B in place of A in step E will g
|
||||
LatticeMatrix Vcheck(grid);
|
||||
Vcheck = zero;
|
||||
Vcheck = where(Accepted,V*adj(V) - 1.0,Vcheck);
|
||||
// std::cout << "SU3 check " <<norm2(Vcheck)<<std::endl;
|
||||
// std::cout<<GridLogMessage << "SU3 check " <<norm2(Vcheck)<<std::endl;
|
||||
assert(norm2(Vcheck)<1.0e-4);
|
||||
|
||||
// Verify the link stays in SU(3)
|
||||
// std::cout <<"Checking the modified link"<<std::endl;
|
||||
// std::cout<<GridLogMessage <<"Checking the modified link"<<std::endl;
|
||||
Vcheck = link*adj(link) - 1.0;
|
||||
assert(norm2(Vcheck)<1.0e-4);
|
||||
/////////////////////////////////
|
||||
@ -483,42 +483,42 @@ Note that in step D setting B ~ X - A and using B in place of A in step E will g
|
||||
for(int gen=0;gen<generators();gen++){
|
||||
Matrix ta;
|
||||
generator(gen,ta);
|
||||
std::cout<< "Nc = "<<ncolour<<" t_"<<gen<<std::endl;
|
||||
std::cout<<ta<<std::endl;
|
||||
std::cout<<GridLogMessage<< "Nc = "<<ncolour<<" t_"<<gen<<std::endl;
|
||||
std::cout<<GridLogMessage<<ta<<std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
static void testGenerators(void){
|
||||
Matrix ta;
|
||||
Matrix tb;
|
||||
std::cout<<"Checking trace ta tb is 0.5 delta_ab"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Checking trace ta tb is 0.5 delta_ab"<<std::endl;
|
||||
for(int a=0;a<generators();a++){
|
||||
for(int b=0;b<generators();b++){
|
||||
generator(a,ta);
|
||||
generator(b,tb);
|
||||
Complex tr =TensorRemove(trace(ta*tb));
|
||||
std::cout<<tr<<" ";
|
||||
std::cout<<GridLogMessage<<tr<<" ";
|
||||
if(a==b) assert(abs(tr-Complex(0.5))<1.0e-6);
|
||||
if(a!=b) assert(abs(tr)<1.0e-6);
|
||||
}
|
||||
std::cout<<std::endl;
|
||||
std::cout<<GridLogMessage<<std::endl;
|
||||
}
|
||||
std::cout<<"Checking hermitian"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Checking hermitian"<<std::endl;
|
||||
for(int a=0;a<generators();a++){
|
||||
generator(a,ta);
|
||||
std::cout<<a<<" ";
|
||||
std::cout<<GridLogMessage<<a<<" ";
|
||||
assert(norm2(ta-adj(ta))<1.0e-6);
|
||||
}
|
||||
std::cout<<std::endl;
|
||||
std::cout<<GridLogMessage<<std::endl;
|
||||
|
||||
std::cout<<"Checking traceless"<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Checking traceless"<<std::endl;
|
||||
for(int a=0;a<generators();a++){
|
||||
generator(a,ta);
|
||||
Complex tr =TensorRemove(trace(ta));
|
||||
std::cout<<a<<" ";
|
||||
std::cout<<GridLogMessage<<a<<" ";
|
||||
assert(abs(tr)<1.0e-6);
|
||||
}
|
||||
std::cout<<std::endl;
|
||||
std::cout<<GridLogMessage<<std::endl;
|
||||
}
|
||||
|
||||
// reunitarise??
|
||||
@ -554,9 +554,7 @@ Note that in step D setting B ~ X - A and using B in place of A in step E will g
|
||||
for(int a=0;a<generators();a++){
|
||||
gaussian(pRNG,ca);
|
||||
generator(a,ta);
|
||||
|
||||
la=toComplex(ca)*ci*ta;
|
||||
|
||||
out += la;
|
||||
}
|
||||
|
||||
|
@ -414,7 +414,7 @@ namespace Grid {
|
||||
template<class S, class V >
|
||||
inline Grid_simd< S, V> outerProduct(const Grid_simd< S, V> &l, const Grid_simd< S, V> & r)
|
||||
{
|
||||
return l*r;
|
||||
return l*conjugate(r);
|
||||
}
|
||||
|
||||
template<class S, class V >
|
||||
|
@ -1 +1 @@
|
||||
timestamp for lib/GridConfig.h
|
||||
timestamp for lib/Config.h
|
||||
|
@ -7,11 +7,12 @@ namespace Grid {
|
||||
///////////////////////////////////////////////
|
||||
// Ta function for scalar, vector, matrix
|
||||
///////////////////////////////////////////////
|
||||
/*
|
||||
inline ComplexF Ta( const ComplexF &arg){ return arg;}
|
||||
inline ComplexD Ta( const ComplexD &arg){ return arg;}
|
||||
inline RealF Ta( const RealF &arg){ return arg;}
|
||||
inline RealD Ta( const RealD &arg){ return arg;}
|
||||
|
||||
*/
|
||||
|
||||
template<class vtype> inline iScalar<vtype> Ta(const iScalar<vtype>&r)
|
||||
{
|
||||
@ -29,10 +30,11 @@ namespace Grid {
|
||||
}
|
||||
template<class vtype,int N> inline iMatrix<vtype,N> Ta(const iMatrix<vtype,N> &arg)
|
||||
{
|
||||
iMatrix<vtype,N> ret(arg);
|
||||
double factor = (1/(double)N);
|
||||
ret = (ret - adj(arg))*0.5;
|
||||
ret -= trace(ret)*factor;
|
||||
iMatrix<vtype,N> ret;
|
||||
|
||||
double factor = (1.0/(double)N);
|
||||
ret= (arg - adj(arg))*0.5;
|
||||
ret=ret - (trace(ret)*factor);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -57,11 +57,11 @@ inline void extract(typename std::enable_if<!isGridTensor<vsimd>::value, const v
|
||||
extracted[i]=buf[i*s];
|
||||
for(int ii=1;ii<s;ii++){
|
||||
if ( buf[i*s]!=buf[i*s+ii] ){
|
||||
std::cout << " SIMD extract failure splat = "<<s<<" ii "<<ii<<" " <<Nextr<<" "<< Nsimd<<" "<<std::endl;
|
||||
std::cout<<GridLogMessage << " SIMD extract failure splat = "<<s<<" ii "<<ii<<" " <<Nextr<<" "<< Nsimd<<" "<<std::endl;
|
||||
for(int vv=0;vv<Nsimd;vv++) {
|
||||
std::cout<< buf[vv]<<" ";
|
||||
std::cout<<GridLogMessage<< buf[vv]<<" ";
|
||||
}
|
||||
std::cout<<std::endl;
|
||||
std::cout<<GridLogMessage<<std::endl;
|
||||
assert(0);
|
||||
}
|
||||
assert(buf[i*s]==buf[i*s+ii]);
|
||||
|
@ -28,11 +28,13 @@ auto outerProduct (const iScalar<l>& lhs,const iScalar<r>& rhs) -> iScalar<declt
|
||||
|
||||
inline ComplexF outerProduct(const ComplexF &l, const ComplexF& r)
|
||||
{
|
||||
return l*r;
|
||||
std::cout << "outer product taking conj "<<r<<" "<<conj(r)<<std::endl;
|
||||
return l*conj(r);
|
||||
}
|
||||
inline ComplexD outerProduct(const ComplexD &l, const ComplexD& r)
|
||||
{
|
||||
return l*r;
|
||||
std::cout << "outer product taking conj "<<r<<" "<<conj(r)<<std::endl;
|
||||
return l*conj(r);
|
||||
}
|
||||
inline RealF outerProduct(const RealF &l, const RealF& r)
|
||||
{
|
||||
|
@ -31,6 +31,16 @@ inline auto trace(const iScalar<vtype> &arg) -> iScalar<decltype(trace(arg._inte
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<class vtype,int N>
|
||||
inline auto trace(const iVector<vtype,N> &arg) -> iVector<decltype(trace(arg._internal[0])),N>
|
||||
{
|
||||
iVector<decltype(trace(arg._internal[0])),N> ret;
|
||||
for(int i=0;i<N;i++){
|
||||
ret._internal[i]=trace(arg._internal[i]);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user