mirror of
https://github.com/paboyle/Grid.git
synced 2025-11-15 19:19:31 +00:00
Compare commits
13 Commits
specflow
...
3737a24096
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3737a24096 | ||
| d418f78352 | |||
| 25163998a0 | |||
|
|
dc546aaa4b | ||
|
|
5364d580c9 | ||
|
|
2a9a6347e3 | ||
|
|
cfdb56f314 | ||
|
|
b517e88db3 | ||
| bb317aba8d | |||
| 644cc6647e | |||
| 72397ce23b | |||
|
|
d60a80c098 | ||
|
|
bb8b6d9d73 |
@@ -73,7 +73,6 @@ NAMESPACE_CHECK(BiCGSTAB);
|
||||
#include <Grid/algorithms/iterative/FlexibleCommunicationAvoidingGeneralisedMinimalResidual.h>
|
||||
#include <Grid/algorithms/iterative/MixedPrecisionFlexibleGeneralisedMinimalResidual.h>
|
||||
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
|
||||
#include <Grid/algorithms/iterative/SimpleLanczos.h>
|
||||
#include <Grid/algorithms/iterative/PowerMethod.h>
|
||||
#include <Grid/algorithms/iterative/AdefGeneric.h>
|
||||
#include <Grid/algorithms/iterative/AdefMrhs.h>
|
||||
|
||||
@@ -269,7 +269,9 @@ public:
|
||||
RealD xscale = 2.0/(hi-lo);
|
||||
RealD mscale = -(hi+lo)/(hi-lo);
|
||||
Linop.HermOp(T0,y);
|
||||
grid->Barrier();
|
||||
axpby(T1,xscale,mscale,y,in);
|
||||
grid->Barrier();
|
||||
|
||||
// sum = .5 c[0] T0 + c[1] T1
|
||||
// out = ()*T0 + Coeffs[1]*T1;
|
||||
|
||||
@@ -1,931 +0,0 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/algorithms/iterative/ImplicitlyRestartedLanczos.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Chulwoo Jung <chulwoo@bnl.gov>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#ifndef GRID_LANC_H
|
||||
#define GRID_LANC_H
|
||||
|
||||
#include <string.h> //memset
|
||||
|
||||
#ifdef USE_LAPACK
|
||||
#ifdef USE_MKL
|
||||
#include<mkl_lapack.h>
|
||||
#else
|
||||
void LAPACK_dstegr (char *jobz, char *range, int *n, double *d, double *e,
|
||||
double *vl, double *vu, int *il, int *iu, double *abstol,
|
||||
int *m, double *w, double *z, int *ldz, int *isuppz,
|
||||
double *work, int *lwork, int *iwork, int *liwork,
|
||||
int *info);
|
||||
//#include <lapacke/lapacke.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
//#include <Grid/algorithms/densematrix/DenseMatrix.h>
|
||||
|
||||
// eliminate temorary vector in calc()
|
||||
#define MEM_SAVE
|
||||
|
||||
namespace Grid
|
||||
{
|
||||
|
||||
struct Bisection
|
||||
{
|
||||
|
||||
#if 0
|
||||
static void get_eig2 (int row_num, std::vector < RealD > &ALPHA,
|
||||
std::vector < RealD > &BETA,
|
||||
std::vector < RealD > &eig)
|
||||
{
|
||||
int i, j;
|
||||
std::vector < RealD > evec1 (row_num + 3);
|
||||
std::vector < RealD > evec2 (row_num + 3);
|
||||
RealD eps2;
|
||||
ALPHA[1] = 0.;
|
||||
BETHA[1] = 0.;
|
||||
for (i = 0; i < row_num - 1; i++)
|
||||
{
|
||||
ALPHA[i + 1] = A[i * (row_num + 1)].real ();
|
||||
BETHA[i + 2] = A[i * (row_num + 1) + 1].real ();
|
||||
}
|
||||
ALPHA[row_num] = A[(row_num - 1) * (row_num + 1)].real ();
|
||||
bisec (ALPHA, BETHA, row_num, 1, row_num, 1e-10, 1e-10, evec1, eps2);
|
||||
bisec (ALPHA, BETHA, row_num, 1, row_num, 1e-16, 1e-16, evec2, eps2);
|
||||
|
||||
// Do we really need to sort here?
|
||||
int begin = 1;
|
||||
int end = row_num;
|
||||
int swapped = 1;
|
||||
while (swapped)
|
||||
{
|
||||
swapped = 0;
|
||||
for (i = begin; i < end; i++)
|
||||
{
|
||||
if (mag (evec2[i]) > mag (evec2[i + 1]))
|
||||
{
|
||||
swap (evec2 + i, evec2 + i + 1);
|
||||
swapped = 1;
|
||||
}
|
||||
}
|
||||
end--;
|
||||
for (i = end - 1; i >= begin; i--)
|
||||
{
|
||||
if (mag (evec2[i]) > mag (evec2[i + 1]))
|
||||
{
|
||||
swap (evec2 + i, evec2 + i + 1);
|
||||
swapped = 1;
|
||||
}
|
||||
}
|
||||
begin++;
|
||||
}
|
||||
|
||||
for (i = 0; i < row_num; i++)
|
||||
{
|
||||
for (j = 0; j < row_num; j++)
|
||||
{
|
||||
if (i == j)
|
||||
H[i * row_num + j] = evec2[i + 1];
|
||||
else
|
||||
H[i * row_num + j] = 0.;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void bisec (std::vector < RealD > &c,
|
||||
std::vector < RealD > &b,
|
||||
int n,
|
||||
int m1,
|
||||
int m2,
|
||||
RealD eps1,
|
||||
RealD relfeh, std::vector < RealD > &x, RealD & eps2)
|
||||
{
|
||||
std::vector < RealD > wu (n + 2);
|
||||
|
||||
RealD h, q, x1, xu, x0, xmin, xmax;
|
||||
int i, a, k;
|
||||
|
||||
b[1] = 0.0;
|
||||
xmin = c[n] - fabs (b[n]);
|
||||
xmax = c[n] + fabs (b[n]);
|
||||
for (i = 1; i < n; i++)
|
||||
{
|
||||
h = fabs (b[i]) + fabs (b[i + 1]);
|
||||
if (c[i] + h > xmax)
|
||||
xmax = c[i] + h;
|
||||
if (c[i] - h < xmin)
|
||||
xmin = c[i] - h;
|
||||
}
|
||||
xmax *= 2.;
|
||||
|
||||
eps2 = relfeh * ((xmin + xmax) > 0.0 ? xmax : -xmin);
|
||||
if (eps1 <= 0.0)
|
||||
eps1 = eps2;
|
||||
eps2 = 0.5 * eps1 + 7.0 * (eps2);
|
||||
x0 = xmax;
|
||||
for (i = m1; i <= m2; i++)
|
||||
{
|
||||
x[i] = xmax;
|
||||
wu[i] = xmin;
|
||||
}
|
||||
|
||||
for (k = m2; k >= m1; k--)
|
||||
{
|
||||
xu = xmin;
|
||||
i = k;
|
||||
do
|
||||
{
|
||||
if (xu < wu[i])
|
||||
{
|
||||
xu = wu[i];
|
||||
i = m1 - 1;
|
||||
}
|
||||
i--;
|
||||
}
|
||||
while (i >= m1);
|
||||
if (x0 > x[k])
|
||||
x0 = x[k];
|
||||
while ((x0 - xu) > 2 * relfeh * (fabs (xu) + fabs (x0)) + eps1)
|
||||
{
|
||||
x1 = (xu + x0) / 2;
|
||||
|
||||
a = 0;
|
||||
q = 1.0;
|
||||
for (i = 1; i <= n; i++)
|
||||
{
|
||||
q =
|
||||
c[i] - x1 -
|
||||
((q != 0.0) ? b[i] * b[i] / q : fabs (b[i]) / relfeh);
|
||||
if (q < 0)
|
||||
a++;
|
||||
}
|
||||
// printf("x1=%0.14e a=%d\n",x1,a);
|
||||
if (a < k)
|
||||
{
|
||||
if (a < m1)
|
||||
{
|
||||
xu = x1;
|
||||
wu[m1] = x1;
|
||||
}
|
||||
else
|
||||
{
|
||||
xu = x1;
|
||||
wu[a + 1] = x1;
|
||||
if (x[a] > x1)
|
||||
x[a] = x1;
|
||||
}
|
||||
}
|
||||
else
|
||||
x0 = x1;
|
||||
}
|
||||
printf ("x0=%0.14e xu=%0.14e k=%d\n", x0, xu, k);
|
||||
x[k] = (x0 + xu) / 2;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Implicitly restarted lanczos
|
||||
/////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
template < class Field > class SimpleLanczos
|
||||
{
|
||||
|
||||
const RealD small = 1.0e-16;
|
||||
public:
|
||||
int lock;
|
||||
int get;
|
||||
int Niter;
|
||||
int converged;
|
||||
|
||||
int Nstop; // Number of evecs checked for convergence
|
||||
int Nk; // Number of converged sought
|
||||
int Np; // Np -- Number of spare vecs in kryloc space
|
||||
int Nm; // Nm -- total number of vectors
|
||||
|
||||
|
||||
RealD OrthoTime;
|
||||
|
||||
RealD eresid;
|
||||
|
||||
// SortEigen < Field > _sort;
|
||||
|
||||
LinearFunction < Field > &_Linop;
|
||||
|
||||
// OperatorFunction < Field > &_poly;
|
||||
|
||||
/////////////////////////
|
||||
// Constructor
|
||||
/////////////////////////
|
||||
void init (void)
|
||||
{
|
||||
};
|
||||
// void Abort (int ff, std::vector < RealD > &evals, DenseVector < Denstd::vector < RealD > >&evecs);
|
||||
|
||||
SimpleLanczos (LinearFunction < Field > &Linop, // op
|
||||
// OperatorFunction < Field > &poly, // polynmial
|
||||
int _Nstop, // sought vecs
|
||||
int _Nk, // sought vecs
|
||||
int _Nm, // spare vecs
|
||||
RealD _eresid, // resid in lmdue deficit
|
||||
int _Niter): // Max iterations
|
||||
|
||||
_Linop (Linop),
|
||||
// _poly (poly),
|
||||
Nstop (_Nstop), Nk (_Nk), Nm (_Nm), eresid (_eresid), Niter (_Niter)
|
||||
{
|
||||
Np = Nm - Nk;
|
||||
assert (Np > 0);
|
||||
};
|
||||
|
||||
/////////////////////////
|
||||
// Sanity checked this routine (step) against Saad.
|
||||
/////////////////////////
|
||||
void RitzMatrix (std::vector < Field > &evec, int k)
|
||||
{
|
||||
|
||||
if (1)
|
||||
return;
|
||||
|
||||
GridBase *grid = evec[0].Grid();
|
||||
Field w (grid);
|
||||
std::cout << GridLogMessage << "RitzMatrix " << std::endl;
|
||||
for (int i = 0; i < k; i++)
|
||||
{
|
||||
_Linop(evec[i], w);
|
||||
// _poly(_Linop,evec[i],w);
|
||||
std::cout << GridLogMessage << "[" << i << "] ";
|
||||
for (int j = 0; j < k; j++)
|
||||
{
|
||||
ComplexD in = innerProduct (evec[j], w);
|
||||
if (fabs ((double) i - j) > 1)
|
||||
{
|
||||
if (abs (in) > 1.0e-9)
|
||||
{
|
||||
std::cout << GridLogMessage << "oops" << std::endl;
|
||||
abort ();
|
||||
}
|
||||
else
|
||||
std::cout << GridLogMessage << " 0 ";
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << GridLogMessage << " " << in << " ";
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
void step (std::vector < RealD > &lmd,
|
||||
std::vector < RealD > &lme,
|
||||
Field & last, Field & current, Field & next, uint64_t k)
|
||||
{
|
||||
if (lmd.size () <= k)
|
||||
lmd.resize (k + Nm);
|
||||
if (lme.size () <= k)
|
||||
lme.resize (k + Nm);
|
||||
|
||||
|
||||
// _poly(_Linop,current,next ); // 3. wk:=Avk−βkv_{k−1}
|
||||
_Linop(current, next); // 3. wk:=Avk−βkv_{k−1}
|
||||
if (k > 0)
|
||||
{
|
||||
next -= lme[k - 1] * last;
|
||||
}
|
||||
// std::cout<<GridLogMessage << "<last|next>" << innerProduct(last,next) <<std::endl;
|
||||
|
||||
ComplexD zalph = innerProduct (current, next); // 4. αk:=(wk,vk)
|
||||
RealD alph = real (zalph);
|
||||
|
||||
next = next - alph * current; // 5. wk:=wk−αkvk
|
||||
// std::cout<<GridLogMessage << "<current|next>" << innerProduct(current,next) <<std::endl;
|
||||
|
||||
RealD beta = normalise (next); // 6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop
|
||||
// 7. vk+1 := wk/βk+1
|
||||
// norm=beta;
|
||||
|
||||
int interval = Nm / 100 + 1;
|
||||
if ((k % interval) == 0)
|
||||
std::
|
||||
cout << GridLogMessage << k << " : alpha = " << zalph << " beta " <<
|
||||
beta << std::endl;
|
||||
const RealD tiny = 1.0e-20;
|
||||
if (beta < tiny)
|
||||
{
|
||||
std::cout << GridLogMessage << " beta is tiny " << beta << std::
|
||||
endl;
|
||||
}
|
||||
lmd[k] = alph;
|
||||
lme[k] = beta;
|
||||
|
||||
}
|
||||
|
||||
void qr_decomp (std::vector < RealD > &lmd,
|
||||
std::vector < RealD > &lme,
|
||||
int Nk,
|
||||
int Nm,
|
||||
std::vector < RealD > &Qt, RealD Dsh, int kmin, int kmax)
|
||||
{
|
||||
int k = kmin - 1;
|
||||
RealD x;
|
||||
|
||||
RealD Fden = 1.0 / hypot (lmd[k] - Dsh, lme[k]);
|
||||
RealD c = (lmd[k] - Dsh) * Fden;
|
||||
RealD s = -lme[k] * Fden;
|
||||
|
||||
RealD tmpa1 = lmd[k];
|
||||
RealD tmpa2 = lmd[k + 1];
|
||||
RealD tmpb = lme[k];
|
||||
|
||||
lmd[k] = c * c * tmpa1 + s * s * tmpa2 - 2.0 * c * s * tmpb;
|
||||
lmd[k + 1] = s * s * tmpa1 + c * c * tmpa2 + 2.0 * c * s * tmpb;
|
||||
lme[k] = c * s * (tmpa1 - tmpa2) + (c * c - s * s) * tmpb;
|
||||
x = -s * lme[k + 1];
|
||||
lme[k + 1] = c * lme[k + 1];
|
||||
|
||||
for (int i = 0; i < Nk; ++i)
|
||||
{
|
||||
RealD Qtmp1 = Qt[i + Nm * k];
|
||||
RealD Qtmp2 = Qt[i + Nm * (k + 1)];
|
||||
Qt[i + Nm * k] = c * Qtmp1 - s * Qtmp2;
|
||||
Qt[i + Nm * (k + 1)] = s * Qtmp1 + c * Qtmp2;
|
||||
}
|
||||
|
||||
// Givens transformations
|
||||
for (int k = kmin; k < kmax - 1; ++k)
|
||||
{
|
||||
|
||||
RealD Fden = 1.0 / hypot (x, lme[k - 1]);
|
||||
RealD c = lme[k - 1] * Fden;
|
||||
RealD s = -x * Fden;
|
||||
|
||||
RealD tmpa1 = lmd[k];
|
||||
RealD tmpa2 = lmd[k + 1];
|
||||
RealD tmpb = lme[k];
|
||||
|
||||
lmd[k] = c * c * tmpa1 + s * s * tmpa2 - 2.0 * c * s * tmpb;
|
||||
lmd[k + 1] = s * s * tmpa1 + c * c * tmpa2 + 2.0 * c * s * tmpb;
|
||||
lme[k] = c * s * (tmpa1 - tmpa2) + (c * c - s * s) * tmpb;
|
||||
lme[k - 1] = c * lme[k - 1] - s * x;
|
||||
|
||||
if (k != kmax - 2)
|
||||
{
|
||||
x = -s * lme[k + 1];
|
||||
lme[k + 1] = c * lme[k + 1];
|
||||
}
|
||||
|
||||
for (int i = 0; i < Nk; ++i)
|
||||
{
|
||||
RealD Qtmp1 = Qt[i + Nm * k];
|
||||
RealD Qtmp2 = Qt[i + Nm * (k + 1)];
|
||||
Qt[i + Nm * k] = c * Qtmp1 - s * Qtmp2;
|
||||
Qt[i + Nm * (k + 1)] = s * Qtmp1 + c * Qtmp2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
#ifdef USE_LAPACK
|
||||
#ifdef USE_MKL
|
||||
#define LAPACK_INT MKL_INT
|
||||
#else
|
||||
#define LAPACK_INT long long
|
||||
#endif
|
||||
void diagonalize_lapack (std::vector < RealD > &lmd, std::vector < RealD > &lme, int N1, // all
|
||||
int N2, // get
|
||||
GridBase * grid)
|
||||
{
|
||||
const int size = Nm;
|
||||
LAPACK_INT NN = N1;
|
||||
double evals_tmp[NN];
|
||||
double DD[NN];
|
||||
double EE[NN];
|
||||
for (int i = 0; i < NN; i++)
|
||||
for (int j = i - 1; j <= i + 1; j++)
|
||||
if (j < NN && j >= 0)
|
||||
{
|
||||
if (i == j)
|
||||
DD[i] = lmd[i];
|
||||
if (i == j)
|
||||
evals_tmp[i] = lmd[i];
|
||||
if (j == (i - 1))
|
||||
EE[j] = lme[j];
|
||||
}
|
||||
LAPACK_INT evals_found;
|
||||
LAPACK_INT lwork =
|
||||
((18 * NN) >
|
||||
(1 + 4 * NN + NN * NN) ? (18 * NN) : (1 + 4 * NN + NN * NN));
|
||||
LAPACK_INT liwork = 3 + NN * 10;
|
||||
LAPACK_INT iwork[liwork];
|
||||
double work[lwork];
|
||||
LAPACK_INT isuppz[2 * NN];
|
||||
char jobz = 'N'; // calculate evals only
|
||||
char range = 'I'; // calculate il-th to iu-th evals
|
||||
// char range = 'A'; // calculate all evals
|
||||
char uplo = 'U'; // refer to upper half of original matrix
|
||||
char compz = 'I'; // Compute eigenvectors of tridiagonal matrix
|
||||
int ifail[NN];
|
||||
LAPACK_INT info;
|
||||
// int total = QMP_get_number_of_nodes();
|
||||
// int node = QMP_get_node_number();
|
||||
// GridBase *grid = evec[0]._grid;
|
||||
int total = grid->_Nprocessors;
|
||||
int node = grid->_processor;
|
||||
int interval = (NN / total) + 1;
|
||||
double vl = 0.0, vu = 0.0;
|
||||
LAPACK_INT il = interval * node + 1, iu = interval * (node + 1);
|
||||
if (iu > NN)
|
||||
iu = NN;
|
||||
double tol = 0.0;
|
||||
if (1)
|
||||
{
|
||||
memset (evals_tmp, 0, sizeof (double) * NN);
|
||||
if (il <= NN)
|
||||
{
|
||||
printf ("total=%d node=%d il=%d iu=%d\n", total, node, il, iu);
|
||||
#ifdef USE_MKL
|
||||
dstegr (&jobz, &range, &NN,
|
||||
#else
|
||||
LAPACK_dstegr (&jobz, &range, &NN,
|
||||
#endif
|
||||
(double *) DD, (double *) EE, &vl, &vu, &il, &iu, // these four are ignored if second parameteris 'A'
|
||||
&tol, // tolerance
|
||||
&evals_found, evals_tmp, (double *) NULL, &NN,
|
||||
isuppz, work, &lwork, iwork, &liwork, &info);
|
||||
for (int i = iu - 1; i >= il - 1; i--)
|
||||
{
|
||||
printf ("node=%d evals_found=%d evals_tmp[%d] = %g\n", node,
|
||||
evals_found, i - (il - 1), evals_tmp[i - (il - 1)]);
|
||||
evals_tmp[i] = evals_tmp[i - (il - 1)];
|
||||
if (il > 1)
|
||||
evals_tmp[i - (il - 1)] = 0.;
|
||||
}
|
||||
}
|
||||
{
|
||||
grid->GlobalSumVector (evals_tmp, NN);
|
||||
}
|
||||
}
|
||||
// cheating a bit. It is better to sort instead of just reversing it, but the document of the routine says evals are sorted in increasing order. qr gives evals in decreasing order.
|
||||
}
|
||||
#undef LAPACK_INT
|
||||
#endif
|
||||
|
||||
|
||||
void diagonalize (std::vector < RealD > &lmd,
|
||||
std::vector < RealD > &lme,
|
||||
int N2, int N1, GridBase * grid)
|
||||
{
|
||||
|
||||
#ifdef USE_LAPACK
|
||||
const int check_lapack = 0; // just use lapack if 0, check against lapack if 1
|
||||
|
||||
if (!check_lapack)
|
||||
return diagonalize_lapack (lmd, lme, N2, N1, grid);
|
||||
|
||||
// diagonalize_lapack(lmd2,lme2,Nm2,Nm,Qt,grid);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
static RealD normalise (Field & v)
|
||||
{
|
||||
RealD nn = norm2 (v);
|
||||
nn = sqrt (nn);
|
||||
v = v * (1.0 / nn);
|
||||
return nn;
|
||||
}
|
||||
|
||||
void orthogonalize (Field & w, std::vector < Field > &evec, int k)
|
||||
{
|
||||
double t0 = -usecond () / 1e6;
|
||||
typedef typename Field::scalar_type MyComplex;
|
||||
MyComplex ip;
|
||||
|
||||
if (0)
|
||||
{
|
||||
for (int j = 0; j < k; ++j)
|
||||
{
|
||||
normalise (evec[j]);
|
||||
for (int i = 0; i < j; i++)
|
||||
{
|
||||
ip = innerProduct (evec[i], evec[j]); // are the evecs normalised? ; this assumes so.
|
||||
evec[j] = evec[j] - ip * evec[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (int j = 0; j < k; ++j)
|
||||
{
|
||||
ip = innerProduct (evec[j], w); // are the evecs normalised? ; this assumes so.
|
||||
w = w - ip * evec[j];
|
||||
}
|
||||
normalise (w);
|
||||
t0 += usecond () / 1e6;
|
||||
OrthoTime += t0;
|
||||
}
|
||||
|
||||
void setUnit_Qt (int Nm, std::vector < RealD > &Qt)
|
||||
{
|
||||
for (int i = 0; i < Qt.size (); ++i)
|
||||
Qt[i] = 0.0;
|
||||
for (int k = 0; k < Nm; ++k)
|
||||
Qt[k + k * Nm] = 1.0;
|
||||
}
|
||||
|
||||
|
||||
void calc (std::vector < RealD > &eval, const Field & src, int &Nconv)
|
||||
{
|
||||
|
||||
GridBase *grid = src.Grid();
|
||||
// assert(grid == src._grid);
|
||||
|
||||
std::
|
||||
cout << GridLogMessage << " -- Nk = " << Nk << " Np = " << Np << std::
|
||||
endl;
|
||||
std::cout << GridLogMessage << " -- Nm = " << Nm << std::endl;
|
||||
std::cout << GridLogMessage << " -- size of eval = " << eval.
|
||||
size () << std::endl;
|
||||
|
||||
// assert(c.size() && Nm == eval.size());
|
||||
|
||||
std::vector < RealD > lme (Nm);
|
||||
std::vector < RealD > lmd (Nm);
|
||||
|
||||
|
||||
Field current (grid);
|
||||
Field last (grid);
|
||||
Field next (grid);
|
||||
|
||||
Nconv = 0;
|
||||
|
||||
RealD beta_k;
|
||||
|
||||
// Set initial vector
|
||||
// (uniform vector) Why not src??
|
||||
// evec[0] = 1.0;
|
||||
current = src;
|
||||
std::cout << GridLogMessage << "norm2(src)= " << norm2 (src) << std::
|
||||
endl;
|
||||
normalise (current);
|
||||
std::
|
||||
cout << GridLogMessage << "norm2(evec[0])= " << norm2 (current) <<
|
||||
std::endl;
|
||||
|
||||
// Initial Nk steps
|
||||
OrthoTime = 0.;
|
||||
double t0 = usecond () / 1e6;
|
||||
RealD norm; // sqrt norm of last vector
|
||||
|
||||
uint64_t iter = 0;
|
||||
|
||||
bool initted = false;
|
||||
std::vector < RealD > low (Nstop * 10);
|
||||
std::vector < RealD > high (Nstop * 10);
|
||||
RealD cont = 0.;
|
||||
while (1) {
|
||||
cont = 0.;
|
||||
std::vector < RealD > lme2 (Nm);
|
||||
std::vector < RealD > lmd2 (Nm);
|
||||
for (uint64_t k = 0; k < Nm; ++k, iter++) {
|
||||
step (lmd, lme, last, current, next, iter);
|
||||
last = current;
|
||||
current = next;
|
||||
}
|
||||
double t1 = usecond () / 1e6;
|
||||
std::cout << GridLogMessage << "IRL::Initial steps: " << t1 -
|
||||
t0 << "seconds" << std::endl;
|
||||
t0 = t1;
|
||||
std::
|
||||
cout << GridLogMessage << "IRL::Initial steps:OrthoTime " <<
|
||||
OrthoTime << "seconds" << std::endl;
|
||||
|
||||
// getting eigenvalues
|
||||
lmd2.resize (iter + 2);
|
||||
lme2.resize (iter + 2);
|
||||
for (uint64_t k = 0; k < iter; ++k) {
|
||||
lmd2[k + 1] = lmd[k];
|
||||
lme2[k + 2] = lme[k];
|
||||
}
|
||||
t1 = usecond () / 1e6;
|
||||
std::cout << GridLogMessage << "IRL:: copy: " << t1 -
|
||||
t0 << "seconds" << std::endl;
|
||||
t0 = t1;
|
||||
{
|
||||
int total = grid->_Nprocessors;
|
||||
int node = grid->_processor;
|
||||
int interval = (Nstop / total) + 1;
|
||||
int iu = (iter + 1) - (interval * node + 1);
|
||||
int il = (iter + 1) - (interval * (node + 1));
|
||||
std::vector < RealD > eval2 (iter + 3);
|
||||
RealD eps2;
|
||||
Bisection::bisec (lmd2, lme2, iter, il, iu, 1e-16, 1e-10, eval2,
|
||||
eps2);
|
||||
// diagonalize(eval2,lme2,iter,Nk,grid);
|
||||
RealD diff = 0.;
|
||||
for (int i = il; i <= iu; i++) {
|
||||
if (initted)
|
||||
diff =
|
||||
fabs (eval2[i] - high[iu-i]) / (fabs (eval2[i]) +
|
||||
fabs (high[iu-i]));
|
||||
if (initted && (diff > eresid))
|
||||
cont = 1.;
|
||||
if (initted)
|
||||
printf ("eval[%d]=%0.14e %0.14e, %0.14e\n", i, eval2[i],
|
||||
high[iu-i], diff);
|
||||
high[iu-i] = eval2[i];
|
||||
}
|
||||
il = (interval * node + 1);
|
||||
iu = (interval * (node + 1));
|
||||
Bisection::bisec (lmd2, lme2, iter, il, iu, 1e-16, 1e-10, eval2,
|
||||
eps2);
|
||||
for (int i = il; i <= iu; i++) {
|
||||
if (initted)
|
||||
diff =
|
||||
fabs (eval2[i] - low[i]) / (fabs (eval2[i]) +
|
||||
fabs (low[i]));
|
||||
if (initted && (diff > eresid))
|
||||
cont = 1.;
|
||||
if (initted)
|
||||
printf ("eval[%d]=%0.14e %0.14e, %0.14e\n", i, eval2[i],
|
||||
low[i], diff);
|
||||
low[i] = eval2[i];
|
||||
}
|
||||
t1 = usecond () / 1e6;
|
||||
std::cout << GridLogMessage << "IRL:: diagonalize: " << t1 -
|
||||
t0 << "seconds" << std::endl;
|
||||
t0 = t1;
|
||||
}
|
||||
|
||||
for (uint64_t k = 0; k < Nk; ++k) {
|
||||
// eval[k] = eval2[k];
|
||||
}
|
||||
if (initted)
|
||||
{
|
||||
grid->GlobalSumVector (&cont, 1);
|
||||
if (cont < 1.) return;
|
||||
}
|
||||
initted = true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#if 0
|
||||
|
||||
/**
|
||||
There is some matrix Q such that for any vector y
|
||||
Q.e_1 = y and Q is unitary.
|
||||
**/
|
||||
template < class T >
|
||||
static T orthQ (DenseMatrix < T > &Q, std::vector < T > y)
|
||||
{
|
||||
int N = y.size (); //Matrix Size
|
||||
Fill (Q, 0.0);
|
||||
T tau;
|
||||
for (int i = 0; i < N; i++)
|
||||
{
|
||||
Q[i][0] = y[i];
|
||||
}
|
||||
T sig = conj (y[0]) * y[0];
|
||||
T tau0 = fabs (sqrt (sig));
|
||||
|
||||
for (int j = 1; j < N; j++)
|
||||
{
|
||||
sig += conj (y[j]) * y[j];
|
||||
tau = abs (sqrt (sig));
|
||||
|
||||
if (abs (tau0) > 0.0)
|
||||
{
|
||||
|
||||
T gam = conj ((y[j] / tau) / tau0);
|
||||
for (int k = 0; k <= j - 1; k++)
|
||||
{
|
||||
Q[k][j] = -gam * y[k];
|
||||
}
|
||||
Q[j][j] = tau0 / tau;
|
||||
}
|
||||
else
|
||||
{
|
||||
Q[j - 1][j] = 1.0;
|
||||
}
|
||||
tau0 = tau;
|
||||
}
|
||||
return tau;
|
||||
}
|
||||
|
||||
/**
|
||||
There is some matrix Q such that for any vector y
|
||||
Q.e_k = y and Q is unitary.
|
||||
**/
|
||||
template < class T >
|
||||
static T orthU (DenseMatrix < T > &Q, std::vector < T > y)
|
||||
{
|
||||
T tau = orthQ (Q, y);
|
||||
SL (Q);
|
||||
return tau;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Wind up with a matrix with the first con rows untouched
|
||||
|
||||
say con = 2
|
||||
Q is such that Qdag H Q has {x, x, val, 0, 0, 0, 0, ...} as 1st colum
|
||||
and the matrix is upper hessenberg
|
||||
and with f and Q appropriately modidied with Q is the arnoldi factorization
|
||||
|
||||
**/
|
||||
|
||||
template < class T > static void Lock (DenseMatrix < T > &H, ///Hess mtx
|
||||
DenseMatrix < T > &Q, ///Lock Transform
|
||||
T val, ///value to be locked
|
||||
int con, ///number already locked
|
||||
RealD small, int dfg, bool herm)
|
||||
{
|
||||
//ForceTridiagonal(H);
|
||||
|
||||
int M = H.dim;
|
||||
DenseVector < T > vec;
|
||||
Resize (vec, M - con);
|
||||
|
||||
DenseMatrix < T > AH;
|
||||
Resize (AH, M - con, M - con);
|
||||
AH = GetSubMtx (H, con, M, con, M);
|
||||
|
||||
DenseMatrix < T > QQ;
|
||||
Resize (QQ, M - con, M - con);
|
||||
|
||||
Unity (Q);
|
||||
Unity (QQ);
|
||||
|
||||
DenseVector < T > evals;
|
||||
Resize (evals, M - con);
|
||||
DenseMatrix < T > evecs;
|
||||
Resize (evecs, M - con, M - con);
|
||||
|
||||
Wilkinson < T > (AH, evals, evecs, small);
|
||||
|
||||
int k = 0;
|
||||
RealD cold = abs (val - evals[k]);
|
||||
for (int i = 1; i < M - con; i++)
|
||||
{
|
||||
RealD cnew = abs (val - evals[i]);
|
||||
if (cnew < cold)
|
||||
{
|
||||
k = i;
|
||||
cold = cnew;
|
||||
}
|
||||
}
|
||||
vec = evecs[k];
|
||||
|
||||
ComplexD tau;
|
||||
orthQ (QQ, vec);
|
||||
//orthQM(QQ,AH,vec);
|
||||
|
||||
AH = Hermitian (QQ) * AH;
|
||||
AH = AH * QQ;
|
||||
|
||||
for (int i = con; i < M; i++)
|
||||
{
|
||||
for (int j = con; j < M; j++)
|
||||
{
|
||||
Q[i][j] = QQ[i - con][j - con];
|
||||
H[i][j] = AH[i - con][j - con];
|
||||
}
|
||||
}
|
||||
|
||||
for (int j = M - 1; j > con + 2; j--)
|
||||
{
|
||||
|
||||
DenseMatrix < T > U;
|
||||
Resize (U, j - 1 - con, j - 1 - con);
|
||||
DenseVector < T > z;
|
||||
Resize (z, j - 1 - con);
|
||||
T nm = norm (z);
|
||||
for (int k = con + 0; k < j - 1; k++)
|
||||
{
|
||||
z[k - con] = conj (H (j, k + 1));
|
||||
}
|
||||
normalise (z);
|
||||
|
||||
RealD tmp = 0;
|
||||
for (int i = 0; i < z.size () - 1; i++)
|
||||
{
|
||||
tmp = tmp + abs (z[i]);
|
||||
}
|
||||
|
||||
if (tmp < small / ((RealD) z.size () - 1.0))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
tau = orthU (U, z);
|
||||
|
||||
DenseMatrix < T > Hb;
|
||||
Resize (Hb, j - 1 - con, M);
|
||||
|
||||
for (int a = 0; a < M; a++)
|
||||
{
|
||||
for (int b = 0; b < j - 1 - con; b++)
|
||||
{
|
||||
T sum = 0;
|
||||
for (int c = 0; c < j - 1 - con; c++)
|
||||
{
|
||||
sum += H[a][con + 1 + c] * U[c][b];
|
||||
} //sum += H(a,con+1+c)*U(c,b);}
|
||||
Hb[b][a] = sum;
|
||||
}
|
||||
}
|
||||
|
||||
for (int k = con + 1; k < j; k++)
|
||||
{
|
||||
for (int l = 0; l < M; l++)
|
||||
{
|
||||
H[l][k] = Hb[k - 1 - con][l];
|
||||
}
|
||||
} //H(Hb[k-1-con][l] , l,k);}}
|
||||
|
||||
DenseMatrix < T > Qb;
|
||||
Resize (Qb, M, M);
|
||||
|
||||
for (int a = 0; a < M; a++)
|
||||
{
|
||||
for (int b = 0; b < j - 1 - con; b++)
|
||||
{
|
||||
T sum = 0;
|
||||
for (int c = 0; c < j - 1 - con; c++)
|
||||
{
|
||||
sum += Q[a][con + 1 + c] * U[c][b];
|
||||
} //sum += Q(a,con+1+c)*U(c,b);}
|
||||
Qb[b][a] = sum;
|
||||
}
|
||||
}
|
||||
|
||||
for (int k = con + 1; k < j; k++)
|
||||
{
|
||||
for (int l = 0; l < M; l++)
|
||||
{
|
||||
Q[l][k] = Qb[k - 1 - con][l];
|
||||
}
|
||||
} //Q(Qb[k-1-con][l] , l,k);}}
|
||||
|
||||
DenseMatrix < T > Hc;
|
||||
Resize (Hc, M, M);
|
||||
|
||||
for (int a = 0; a < j - 1 - con; a++)
|
||||
{
|
||||
for (int b = 0; b < M; b++)
|
||||
{
|
||||
T sum = 0;
|
||||
for (int c = 0; c < j - 1 - con; c++)
|
||||
{
|
||||
sum += conj (U[c][a]) * H[con + 1 + c][b];
|
||||
} //sum += conj( U(c,a) )*H(con+1+c,b);}
|
||||
Hc[b][a] = sum;
|
||||
}
|
||||
}
|
||||
|
||||
for (int k = 0; k < M; k++)
|
||||
{
|
||||
for (int l = con + 1; l < j; l++)
|
||||
{
|
||||
H[l][k] = Hc[k][l - 1 - con];
|
||||
}
|
||||
} //H(Hc[k][l-1-con] , l,k);}}
|
||||
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
||||
@@ -260,32 +260,39 @@ CartesianCommunicator::~CartesianCommunicator()
|
||||
}
|
||||
#ifdef USE_GRID_REDUCTION
|
||||
void CartesianCommunicator::GlobalSum(float &f){
|
||||
FlightRecorder::StepLog("GlobalSumP2P");
|
||||
CartesianCommunicator::GlobalSumP2P(f);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(double &d)
|
||||
{
|
||||
FlightRecorder::StepLog("GlobalSumP2P");
|
||||
CartesianCommunicator::GlobalSumP2P(d);
|
||||
}
|
||||
#else
|
||||
void CartesianCommunicator::GlobalSum(float &f){
|
||||
FlightRecorder::StepLog("AllReduce");
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(double &d)
|
||||
{
|
||||
FlightRecorder::StepLog("AllReduce");
|
||||
int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
#endif
|
||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
||||
FlightRecorder::StepLog("AllReduce");
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(uint64_t &u){
|
||||
FlightRecorder::StepLog("AllReduce");
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSumVector(uint64_t* u,int N){
|
||||
FlightRecorder::StepLog("AllReduceVector");
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,u,N,MPI_UINT64_T,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
@@ -794,6 +801,7 @@ void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsReque
|
||||
|
||||
void CartesianCommunicator::StencilBarrier(void)
|
||||
{
|
||||
FlightRecorder::StepLog("NodeBarrier");
|
||||
MPI_Barrier (ShmComm);
|
||||
}
|
||||
//void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||
@@ -801,11 +809,13 @@ void CartesianCommunicator::StencilBarrier(void)
|
||||
//}
|
||||
void CartesianCommunicator::Barrier(void)
|
||||
{
|
||||
FlightRecorder::StepLog("GridBarrier");
|
||||
int ierr = MPI_Barrier(communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
||||
{
|
||||
FlightRecorder::StepLog("Broadcast");
|
||||
int ierr=MPI_Bcast(data,
|
||||
bytes,
|
||||
MPI_BYTE,
|
||||
@@ -824,6 +834,7 @@ void CartesianCommunicator::BarrierWorld(void){
|
||||
}
|
||||
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
|
||||
{
|
||||
FlightRecorder::StepLog("BroadcastWorld");
|
||||
int ierr= MPI_Bcast(data,
|
||||
bytes,
|
||||
MPI_BYTE,
|
||||
@@ -846,6 +857,7 @@ void CartesianCommunicator::AllToAll(int dim,void *in,void *out,uint64_t words,
|
||||
}
|
||||
void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t bytes)
|
||||
{
|
||||
FlightRecorder::StepLog("AllToAll");
|
||||
// MPI is a pain and uses "int" arguments
|
||||
// 64*64*64*128*16 == 500Million elements of data.
|
||||
// When 24*4 bytes multiples get 50x 10^9 >>> 2x10^9 Y2K bug.
|
||||
|
||||
@@ -990,7 +990,7 @@ void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
|
||||
}
|
||||
#endif
|
||||
|
||||
SharedMemoryTest();
|
||||
// SharedMemoryTest();
|
||||
}
|
||||
//////////////////////////////////////////////////////////////////
|
||||
// On node barrier
|
||||
|
||||
@@ -122,10 +122,10 @@ void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
|
||||
{
|
||||
acceleratorMemSet(dest,0,bytes);
|
||||
}
|
||||
void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes)
|
||||
{
|
||||
acceleratorCopyToDevice(src,dest,bytes);
|
||||
}
|
||||
//void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes)
|
||||
//{
|
||||
// acceleratorCopyToDevice(src,dest,bytes);
|
||||
//}
|
||||
////////////////////////////////////////////////////////
|
||||
// Global shared functionality finished
|
||||
// Now move to per communicator functionality
|
||||
|
||||
@@ -236,7 +236,7 @@ public:
|
||||
template<class sobj> inline Lattice<vobj> & operator = (const sobj & r){
|
||||
vobj vtmp;
|
||||
vtmp = r;
|
||||
#if 0
|
||||
#if 1
|
||||
deviceVector<vobj> vvtmp(1);
|
||||
acceleratorPut(vvtmp[0],vtmp);
|
||||
vobj *vvtmp_p = & vvtmp[0];
|
||||
|
||||
@@ -252,6 +252,11 @@ void WilsonFlow<Gimpl>::smear(GaugeField& out, const GaugeField& in) const{
|
||||
|
||||
out = in;
|
||||
RealD taus = 0.;
|
||||
|
||||
// Perform initial t=0 measurements
|
||||
for(auto const &meas : this->functions)
|
||||
meas.second(0,taus,out);
|
||||
|
||||
for (unsigned int step = 1; step <= Nstep; step++) { //step indicates the number of smearing steps applied at the time of measurement
|
||||
auto start = std::chrono::high_resolution_clock::now();
|
||||
evolve_step(out, taus);
|
||||
@@ -336,6 +341,11 @@ void WilsonFlowAdaptive<Gimpl>::smear(GaugeField& out, const GaugeField& in) con
|
||||
RealD taus = 0.;
|
||||
RealD eps = init_epsilon;
|
||||
unsigned int step = 0;
|
||||
|
||||
// Perform initial t=0 measurements
|
||||
for(auto const &meas : this->functions)
|
||||
meas.second(step,taus,out);
|
||||
|
||||
do{
|
||||
int step_success = evolve_step_adaptive(out, taus, eps);
|
||||
step += step_success; //step will not be incremented if the integration step fails
|
||||
|
||||
@@ -396,6 +396,7 @@ public:
|
||||
Packets[i].from_rank,Packets[i].do_recv,
|
||||
Packets[i].xbytes,Packets[i].rbytes,i);
|
||||
}
|
||||
FlightRecorder::StepLog("Communicate begin has finished");
|
||||
// Get comms started then run checksums
|
||||
// Having this PRIOR to the dslash seems to make Sunspot work... (!)
|
||||
for(int i=0;i<Packets.size();i++){
|
||||
|
||||
@@ -251,7 +251,7 @@ inline void acceleratorCopyToDevice(const void *from,void *to,size_t bytes) { c
|
||||
inline void acceleratorCopyFromDevice(const void *from,void *to,size_t bytes){ cudaMemcpy(to,from,bytes, cudaMemcpyDeviceToHost);}
|
||||
inline void acceleratorMemSet(void *base,int value,size_t bytes) { cudaMemset(base,value,bytes);}
|
||||
inline acceleratorEvent_t acceleratorCopyToDeviceAsynch(void *from, void *to, size_t bytes, cudaStream_t stream = copyStream) {
|
||||
acceleratorCopyToDevice(to,from,bytes, cudaMemcpyHostToDevice);
|
||||
acceleratorCopyToDevice(from,to,bytes);
|
||||
return 0;
|
||||
}
|
||||
inline acceleratorEvent_t acceleratorCopyFromDeviceAsynch(void *from, void *to, size_t bytes, cudaStream_t stream = copyStream) {
|
||||
@@ -337,7 +337,7 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) {
|
||||
cgh.parallel_for( \
|
||||
sycl::nd_range<3>(global,local), \
|
||||
[=] (sycl::nd_item<3> item) /*mutable*/ \
|
||||
[[intel::reqd_sub_group_size(16)]] \
|
||||
[[sycl::reqd_sub_group_size(16)]] \
|
||||
{ \
|
||||
auto iter1 = item.get_global_id(0); \
|
||||
auto iter2 = item.get_global_id(1); \
|
||||
|
||||
@@ -28,11 +28,6 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
/* END LEGAL */
|
||||
#pragma once
|
||||
|
||||
#ifndef MIN
|
||||
#define MIN(x,y) ((x)>(y)?(y):(x))
|
||||
#endif
|
||||
|
||||
|
||||
// Introduce a class to gain deterministic bit reproducible reduction.
|
||||
// make static; perhaps just a namespace is required.
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
@@ -638,12 +638,11 @@ void Grid_debug_handler_init(void)
|
||||
sa.sa_flags = SA_SIGINFO;
|
||||
// sigaction(SIGSEGV,&sa,NULL);
|
||||
sigaction(SIGTRAP,&sa,NULL);
|
||||
sigaction(SIGBUS,&sa,NULL);
|
||||
// sigaction(SIGBUS,&sa,NULL);
|
||||
// sigaction(SIGUSR2,&sa,NULL);
|
||||
|
||||
feenableexcept( FE_INVALID|FE_OVERFLOW|FE_DIVBYZERO);
|
||||
|
||||
sigaction(SIGFPE,&sa,NULL);
|
||||
// feenableexcept( FE_INVALID|FE_OVERFLOW|FE_DIVBYZERO);
|
||||
// sigaction(SIGFPE,&sa,NULL);
|
||||
sigaction(SIGKILL,&sa,NULL);
|
||||
sigaction(SIGILL,&sa,NULL);
|
||||
|
||||
|
||||
@@ -66,6 +66,7 @@ namespace Grid{
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
template <class T> void writeFile(T& in, std::string const fname){
|
||||
#ifdef HAVE_LIME
|
||||
// Ref: https://github.com/paboyle/Grid/blob/feature/scidac-wp1/tests/debug/Test_general_coarse_hdcg_phys48.cc#L111
|
||||
@@ -73,7 +74,7 @@ template <class T> void writeFile(T& in, std::string const fname){
|
||||
Grid::emptyUserRecord record;
|
||||
Grid::ScidacWriter WR(in.Grid()->IsBoss());
|
||||
WR.open(fname);
|
||||
WR.writeScidacFieldRecord(in,record,0);
|
||||
WR.writeScidacFieldRecord(in,record,0); // Lexico
|
||||
WR.close();
|
||||
#endif
|
||||
// What is the appropriate way to throw error?
|
||||
@@ -107,8 +108,18 @@ int main(int argc, char **argv) {
|
||||
|
||||
for (int conf = CPar.StartConfiguration; conf <= CPar.EndConfiguration; conf+= CPar.Skip){
|
||||
|
||||
#if 0
|
||||
CPNersc.CheckpointRestore(conf, Umu, sRNG, pRNG);
|
||||
#else
|
||||
// Don't require Grid format RNGs
|
||||
FieldMetaData header;
|
||||
std::string file, filesmr;
|
||||
file = CPar.conf_path + "/" + CPar.conf_prefix + "." + std::to_string(conf);
|
||||
filesmr = CPar.conf_path + "/" + CPar.conf_smr_prefix + "." + std::to_string(conf);
|
||||
|
||||
NerscIO::readConfiguration(Umu,header,file);
|
||||
#endif
|
||||
|
||||
std::cout << std::setprecision(15);
|
||||
std::cout << GridLogMessage << "Initial plaquette: "<< WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu) << std::endl;
|
||||
|
||||
@@ -116,6 +127,7 @@ int main(int argc, char **argv) {
|
||||
std::string file_post = CPar.conf_prefix + "." + std::to_string(conf);
|
||||
|
||||
WilsonFlow<PeriodicGimplR> WF(WFPar.step_size,WFPar.steps,WFPar.meas_interval);
|
||||
|
||||
WF.addMeasurement(WFPar.meas_interval_density, [&file_pre,&file_post,&conf](int step, RealD t, const typename PeriodicGimplR::GaugeField &U){
|
||||
|
||||
typedef typename PeriodicGimplR::GaugeLinkField GaugeMat;
|
||||
@@ -165,33 +177,48 @@ int main(int argc, char **argv) {
|
||||
//double coeff = 2.0 / (1.0 * Nd * (Nd - 1)) / 3.0;
|
||||
//Plq = coeff * Plq;
|
||||
|
||||
int tau = std::round(t);
|
||||
std::string efile = file_pre + "E_dnsty_" + std::to_string(tau) + "_" + file_post;
|
||||
writeFile(R,efile);
|
||||
std::string tfile = file_pre + "Top_dnsty_" + std::to_string(tau) + "_" + file_post;
|
||||
writeFile(qfield,tfile);
|
||||
|
||||
RealD WFlow_TC5Li = WilsonLoops<PeriodicGimplR>::TopologicalCharge5Li(U);
|
||||
|
||||
int tau = std::round(t);
|
||||
|
||||
std::string efile = file_pre + "E_dnsty_" + std::to_string(tau) + "_" + file_post;
|
||||
// writeFile(R,efile);
|
||||
|
||||
std::string tfile = file_pre + "Top_dnsty_" + std::to_string(tau) + "_" + file_post;
|
||||
// writeFile(qfield,tfile);
|
||||
|
||||
std::string ufile = file_pre + "U_" + std::to_string(tau) + "_" + file_post;
|
||||
{
|
||||
// PeriodicGimplR::GaugeField Ucopy = U;
|
||||
// NerscIO::writeConfiguration(Ucopy,ufile);
|
||||
}
|
||||
|
||||
RealD E = real(sum(R))/ RealD(U.Grid()->gSites());
|
||||
RealD T = real( sum(qfield) );
|
||||
Coordinate scoor; for (int mu=0; mu < Nd; mu++) scoor[mu] = 0;
|
||||
RealD E0 = real(peekSite(R,scoor));
|
||||
RealD T0 = real(peekSite(qfield,scoor));
|
||||
std::cout << GridLogMessage << "[WilsonFlow] Saved energy density (clover) & topo. charge density: " << conf << " " << step << " " << tau << " "
|
||||
<< "(E_avg,T_sum) " << E << " " << T << " (E, T at origin) " << E0 << " " << T0 << std::endl;
|
||||
<< "(E_avg,T_sum) " << E << " " << T << " (E, T at origin) " << E0 << " " << T0 << " Q5Li "<< WFlow_TC5Li << std::endl;
|
||||
|
||||
});
|
||||
|
||||
int t=WFPar.maxTau;
|
||||
WF.smear(Uflow, Umu);
|
||||
|
||||
// NerscIO::writeConfiguration(Uflow,filesmr);
|
||||
|
||||
|
||||
RealD WFlow_plaq = WilsonLoops<PeriodicGimplR>::avgPlaquette(Uflow);
|
||||
RealD WFlow_TC = WilsonLoops<PeriodicGimplR>::TopologicalCharge(Uflow);
|
||||
RealD WFlow_TC5Li = WilsonLoops<PeriodicGimplR>::TopologicalCharge5Li(Uflow);
|
||||
RealD WFlow_T0 = WF.energyDensityPlaquette(t,Uflow); // t
|
||||
RealD WFlow_EC = WF.energyDensityCloverleaf(t,Uflow);
|
||||
std::cout << GridLogMessage << "Plaquette "<< conf << " " << WFlow_plaq << std::endl;
|
||||
std::cout << GridLogMessage << "T0 "<< conf << " " << WFlow_T0 << std::endl;
|
||||
std::cout << GridLogMessage << "TC0 "<< conf << " " << WFlow_EC << std::endl;
|
||||
std::cout << GridLogMessage << "TopologicalCharge "<< conf << " " << WFlow_TC << std::endl;
|
||||
std::cout << GridLogMessage << "Plaquette "<< conf << " " << WFlow_plaq << std::endl;
|
||||
std::cout << GridLogMessage << "T0 "<< conf << " " << WFlow_T0 << std::endl;
|
||||
std::cout << GridLogMessage << "TC0 "<< conf << " " << WFlow_EC << std::endl;
|
||||
std::cout << GridLogMessage << "TopologicalCharge "<< conf << " " << WFlow_TC << std::endl;
|
||||
std::cout << GridLogMessage << "TopologicalCharge5Li "<< conf << " " << WFlow_TC5Li<< std::endl;
|
||||
|
||||
std::cout<< GridLogMessage << " Admissibility check:\n";
|
||||
const double sp_adm = 0.067; // admissible threshold
|
||||
|
||||
@@ -873,7 +873,7 @@ int main (int argc, char ** argv)
|
||||
int do_su4=0;
|
||||
int do_memory=1;
|
||||
int do_comms =1;
|
||||
int do_blas =0;
|
||||
int do_blas =1;
|
||||
int do_dslash=1;
|
||||
|
||||
int sel=4;
|
||||
|
||||
@@ -7,8 +7,6 @@ CXX=mpicxx ../../configure \
|
||||
--enable-unified=yes \
|
||||
--prefix /Users/peterboyle/QCD/vtk/Grid/install \
|
||||
--with-lime=$CLIME \
|
||||
--with-hdf5=$HDF5 \
|
||||
--with-fftw=$FFTW \
|
||||
--with-openssl=$OPENSSL \
|
||||
--with-gmp=$GMP \
|
||||
--with-mpfr=$MPFR \
|
||||
|
||||
@@ -1,3 +1,12 @@
|
||||
|
||||
spack load c-lime
|
||||
spack load fftw
|
||||
spack load hdf5+cxx
|
||||
|
||||
export FFTW=`spack find --paths fftw | grep ^fftw | awk '{print $2}' `
|
||||
export HDF5=`spack find --paths hdf5+cxx | grep ^hdf5 | awk '{print $2}' `
|
||||
export CLIME=`spack find --paths c-lime | grep ^c-lime | awk '{print $2}' `
|
||||
|
||||
../../configure \
|
||||
--enable-comms=mpi-auto \
|
||||
--enable-unified=yes \
|
||||
@@ -5,12 +14,16 @@
|
||||
--enable-shm-fast-path=shmopen \
|
||||
--enable-accelerator=none \
|
||||
--enable-simd=AVX512 \
|
||||
--disable-accelerator-cshift \
|
||||
--with-lime=$CLIME \
|
||||
--with-hdf5=$HDF5 \
|
||||
--with-fftw=$FFTW \
|
||||
--disable-fermion-reps \
|
||||
--disable-gparity \
|
||||
CXX=clang++ \
|
||||
MPICXX=mpicxx \
|
||||
CXXFLAGS="-std=c++17"
|
||||
LIBS=-llime \
|
||||
LDFLAGS=-L$CLIME/lib/ \
|
||||
CXXFLAGS="-std=c++17 -fPIE"
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
source $HOME/spack/share/spack/setup-env.sh
|
||||
spack load llvm@17.0.4
|
||||
export LD_LIBRARY_PATH=/direct/sdcc+u/paboyle/spack/opt/spack/linux-almalinux8-icelake/gcc-8.5.0/llvm-17.0.4-laufdrcip63ivkadmtgoepwmj3dtztdu/lib:$LD_LIBRARY_PATH
|
||||
module load openmpi
|
||||
module load openmpi/4.1.8
|
||||
spack load c-lime
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
<?xml version="1.0"?>
|
||||
<grid>
|
||||
<LanczosParameters>
|
||||
<mass>-1.025</mass>
|
||||
<mstep>-0.025</mstep>
|
||||
<mass>0.00107</mass>
|
||||
<M5>1.8</M5>
|
||||
<Ls>48</Ls>
|
||||
<Nstop>10</Nstop>
|
||||
<Nk>12</Nk>
|
||||
<Np>30</Np>
|
||||
<ChebyLow>0.1</ChebyLow>
|
||||
<ChebyHigh>50</ChebyHigh>
|
||||
<ChebyOrder>51</ChebyOrder>
|
||||
<Nk>15</Nk>
|
||||
<Np>85</Np>
|
||||
<ChebyLow>0.003</ChebyLow>
|
||||
<ChebyHigh>60</ChebyHigh>
|
||||
<ChebyOrder>201</ChebyOrder>
|
||||
</LanczosParameters>
|
||||
</grid>
|
||||
|
||||
@@ -31,16 +31,23 @@ directory
|
||||
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
;
|
||||
|
||||
#if 0
|
||||
//typedef WilsonFermionD FermionOp;
|
||||
typedef DomainWallFermionD FermionOp;
|
||||
typedef typename DomainWallFermionD::FermionField FermionField;
|
||||
#else
|
||||
typedef MobiusFermionD FermionOp;
|
||||
typedef typename MobiusFermionD::FermionField FermionField;
|
||||
#endif
|
||||
|
||||
template <class T> void writeFile(T& in, std::string const fname){
|
||||
#ifdef HAVE_LIME
|
||||
// Ref: https://github.com/paboyle/Grid/blob/feature/scidac-wp1/tests/debug/Test_general_coarse_hdcg_phys48.cc#L111
|
||||
std::cout << Grid::GridLogMessage << "Writes to: " << fname << std::endl;
|
||||
Grid::emptyUserRecord record;
|
||||
Grid::ScidacWriter WR(in.Grid()->IsBoss());
|
||||
WR.open(fname);
|
||||
WR.writeScidacFieldRecord(in,record,0);
|
||||
WR.close();
|
||||
#endif
|
||||
// What is the appropriate way to throw error?
|
||||
}
|
||||
|
||||
RealD AllZero(RealD x) { return 0.; }
|
||||
|
||||
@@ -125,7 +132,7 @@ int main(int argc, char** argv) {
|
||||
|
||||
int Ls=16;
|
||||
RealD M5=1.8;
|
||||
RealD mass = -1.0;
|
||||
RealD mass = 0.01;
|
||||
|
||||
mass=LanParams.mass;
|
||||
Ls=LanParams.Ls;
|
||||
@@ -163,10 +170,10 @@ int main(int argc, char** argv) {
|
||||
U[mu] = PeekIndex<LorentzIndex>(Umu, mu);
|
||||
}
|
||||
*/
|
||||
|
||||
int Nstop = 10;
|
||||
int Nk = 20;
|
||||
int Nstop = Nk;
|
||||
int Np = 80;
|
||||
|
||||
Nstop=LanParams.Nstop;
|
||||
Nk=LanParams.Nk;
|
||||
Np=LanParams.Np;
|
||||
@@ -174,11 +181,10 @@ int main(int argc, char** argv) {
|
||||
int Nm = Nk + Np;
|
||||
int MaxIt = 10000;
|
||||
RealD resid = 1.0e-5;
|
||||
RealD mob_b=1.5;
|
||||
|
||||
|
||||
//while ( mass > - 5.0){
|
||||
// FermionOp Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
||||
FermionOp Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,mob_b,mob_b-1.);
|
||||
FermionOp Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
||||
MdagMLinearOperator<FermionOp,FermionField> HermOp(Ddwf); /// <-----
|
||||
// Gamma5HermitianLinearOperator <FermionOp,LatticeFermion> HermOp2(WilsonOperator); /// <-----
|
||||
Gamma5R5HermitianLinearOperator<FermionOp, LatticeFermion> G5R5Herm(Ddwf);
|
||||
@@ -206,8 +212,12 @@ int main(int argc, char** argv) {
|
||||
int Nconv;
|
||||
IRL.calc(eval, evec, src, Nconv);
|
||||
|
||||
std::cout << mass <<" : " << eval << std::endl;
|
||||
|
||||
std::cout << mass <<" : " << eval << std::endl;
|
||||
std::cout << " #evecs " << evec.size() << std::endl;
|
||||
std::cout << " Nconv " << Nconv << std::endl;
|
||||
std::cout << " Nm " << Nm << std::endl;
|
||||
if ( Nconv > evec.size() ) Nconv = evec.size();
|
||||
|
||||
#if 0
|
||||
Gamma g5(Gamma::Algebra::Gamma5) ;
|
||||
ComplexD dot;
|
||||
@@ -237,6 +247,7 @@ int main(int argc, char** argv) {
|
||||
vector<LatticeFermion> finalevec(Nconv, FGrid);
|
||||
vector<RealD> eMe(Nconv), eMMe(Nconv);
|
||||
for(int i = 0; i < Nconv; i++){
|
||||
cout << "calculate the matrix element["<<i<<"]" << endl;
|
||||
G5R5Herm.HermOpAndNorm(evec[i], G5R5Mevec[i], eMe[i], eMMe[i]);
|
||||
}
|
||||
cout << "Re<evec, G5R5M(evec)>: " << endl;
|
||||
@@ -303,7 +314,7 @@ int main(int argc, char** argv) {
|
||||
}
|
||||
}
|
||||
}
|
||||
for(int i = 0; i < Nconv; i++){
|
||||
for(int i = 0; i < Nconv; i++){
|
||||
G5R5Herm.HermOpAndNorm(finalevec[i], G5R5Mevec[i], eMe[i], eMMe[i]);
|
||||
}
|
||||
cout << "Re<evec, G5R5M(evec)>: " << endl;
|
||||
@@ -311,6 +322,7 @@ int main(int argc, char** argv) {
|
||||
cout << "<G5R5M(evec), G5R5M(evec)>" << endl;
|
||||
cout << eMMe << endl;
|
||||
|
||||
|
||||
|
||||
// vector<LatticeFermion> finalevec(Nconv, FGrid);
|
||||
// temporary, until doing rotation
|
||||
@@ -331,13 +343,41 @@ int main(int argc, char** argv) {
|
||||
axpby_ssp(G5evec[i], -1., finalevec[i], 0., G5evec[i], j, j);
|
||||
}
|
||||
}
|
||||
|
||||
for(int i = 0; i < Nconv; i++){
|
||||
Ddwf.M(finalevec[i], G5R5Mevec[i]);
|
||||
for(int j = 0; j < Nconv; j++){
|
||||
std::cout << "<"<<j<<"|Ddwf|"<<i<<"> = "<<innerProduct(finalevec[j],G5R5Mevec[i])<<std::endl;
|
||||
}
|
||||
}
|
||||
for(int i = 0; i < Nconv; i++){
|
||||
RealD t1,t2;
|
||||
G5R5Herm.HermOpAndNorm(finalevec[i], G5R5Mevec[i], t1, t2);
|
||||
for(int j = 0; j < Nconv; j++){
|
||||
std::cout << "<"<<j<<"|G5R5 M|"<<i<<"> = "<<innerProduct(finalevec[j],G5R5Mevec[i])<<std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
for(int i = 0; i < Nconv; i++){
|
||||
chiral_matrix_real[i].resize(Nconv);
|
||||
chiral_matrix[i].resize(Nconv);
|
||||
|
||||
std::string evfile("./evec_density");
|
||||
evfile = evfile+"_"+std::to_string(i);
|
||||
auto evdensity = localInnerProduct(finalevec[i],finalevec[i] );
|
||||
writeFile(evdensity,evfile);
|
||||
|
||||
for(int j = 0; j < Nconv; j++){
|
||||
chiral_matrix[i][j] = innerProduct(finalevec[i], G5evec[j]);
|
||||
std::cout <<" chiral_matrix_real signed "<<i<<" "<<j<<" "<< chiral_matrix_real[i][j] << std::endl;
|
||||
chiral_matrix_real[i][j] = abs(chiral_matrix[i][j]);
|
||||
std::cout <<" chiral_matrix_real "<<i<<" "<<j<<" "<< chiral_matrix_real[i][j] << std::endl;
|
||||
if ( chiral_matrix_real[i][j] > 0.8 ) {
|
||||
auto g5density = localInnerProduct(finalevec[i], G5evec[j]);
|
||||
std::string chfile("./chiral_density_");
|
||||
chfile = chfile +std::to_string(i)+"_"+std::to_string(j);
|
||||
writeFile(g5density,chfile);
|
||||
}
|
||||
}
|
||||
}
|
||||
for(int i = 0; i < Nconv; i++){
|
||||
@@ -346,6 +386,43 @@ int main(int argc, char** argv) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FILE *fp = fopen("lego-plot.py","w"); assert(fp!=NULL);
|
||||
#define PYTHON_LINE(A) fprintf(fp,A"\n");
|
||||
PYTHON_LINE("import matplotlib.pyplot as plt");
|
||||
PYTHON_LINE("import numpy as np");
|
||||
PYTHON_LINE("");
|
||||
PYTHON_LINE("fig = plt.figure()");
|
||||
PYTHON_LINE("ax = fig.add_subplot(projection='3d')");
|
||||
PYTHON_LINE("");
|
||||
PYTHON_LINE("x, y = np.random.rand(2, 100) * 4");
|
||||
fprintf(fp,"hist, xedges, yedges = np.histogram2d(x, y, bins=%d, range=[[0, %d], [0, %d]])\n",Nconv,Nconv-1,Nconv-1);
|
||||
PYTHON_LINE("");
|
||||
PYTHON_LINE("# Construct arrays for the anchor positions of the 16 bars");
|
||||
PYTHON_LINE("xpos, ypos = np.meshgrid(xedges[:-1] + 0.25, yedges[:-1] + 0.25, indexing=\"ij\")");
|
||||
PYTHON_LINE("xpos = xpos.ravel()");
|
||||
PYTHON_LINE("ypos = ypos.ravel()");
|
||||
PYTHON_LINE("zpos = 0");
|
||||
PYTHON_LINE("");
|
||||
PYTHON_LINE("# Construct arrays with the dimensions for the 16 bars.");
|
||||
PYTHON_LINE("dx = dy = 0.5 * np.ones_like(zpos)");
|
||||
PYTHON_LINE("dz = np.array([");
|
||||
for(int i = 0; i < Nconv; i++){
|
||||
fprintf(fp,"\t[ ");
|
||||
for(int j = 0; j < Nconv; j++){
|
||||
fprintf(fp,"%lf ",chiral_matrix_real[i][j]);
|
||||
if(j<Nconv-1) fprintf(fp,",");
|
||||
else fprintf(fp," ");
|
||||
}
|
||||
fprintf(fp,"]");
|
||||
if(i<Nconv-1) fprintf(fp,",\n");
|
||||
else fprintf(fp,"\n");
|
||||
}
|
||||
|
||||
PYTHON_LINE("\t])");
|
||||
PYTHON_LINE("dz = dz.ravel()");
|
||||
PYTHON_LINE("ax.bar3d(xpos, ypos, zpos, dx, dy, dz, zsort='average')");
|
||||
PYTHON_LINE("plt.show()");
|
||||
fclose(fp);
|
||||
|
||||
Grid_finalize();
|
||||
}
|
||||
|
||||
@@ -113,9 +113,6 @@ struct LanczosParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParameters,
|
||||
RealD, mass ,
|
||||
RealD, resid,
|
||||
Integer, Nstop,
|
||||
Integer, Nk,
|
||||
Integer, Np,
|
||||
RealD, ChebyLow,
|
||||
RealD, ChebyHigh,
|
||||
Integer, ChebyOrder)
|
||||
@@ -207,6 +204,7 @@ int main(int argc, char** argv) {
|
||||
int Nstop = 5;
|
||||
int Nk = 10;
|
||||
int Np = 90;
|
||||
int Nm = Nk + Np;
|
||||
int MaxIt = 10000;
|
||||
RealD resid = 1.0e-5;
|
||||
|
||||
@@ -228,14 +226,10 @@ int main(int argc, char** argv) {
|
||||
XmlWriter HMCwr("LanParams.xml.out");
|
||||
write(HMCwr,"LanczosParameters",LanParams);
|
||||
}
|
||||
Nstop=LanParams.Nstop;
|
||||
Nk=LanParams.Nk;
|
||||
Np=LanParams.Np;
|
||||
|
||||
mass=LanParams.mass;
|
||||
resid=LanParams.resid;
|
||||
|
||||
int Nm = Nk + Np;
|
||||
|
||||
|
||||
while ( mass > - 5.0){
|
||||
FermionOp WilsonOperator(Umu,*FGrid,*FrbGrid,2.+mass);
|
||||
|
||||
@@ -61,8 +61,7 @@ int main(int argc, char** argv) {
|
||||
RNG5.SeedFixedIntegers(seeds5);
|
||||
|
||||
LatticeGaugeField Umu(UGrid);
|
||||
// SU<Nc>::HotConfiguration(RNG4, Umu);
|
||||
SU<Nc>::ColdConfiguration(Umu);
|
||||
SU<Nc>::HotConfiguration(RNG4, Umu);
|
||||
|
||||
/*
|
||||
std::vector<LatticeColourMatrix> U(4, UGrid);
|
||||
@@ -70,15 +69,9 @@ int main(int argc, char** argv) {
|
||||
U[mu] = PeekIndex<LorentzIndex>(Umu, mu);
|
||||
}
|
||||
*/
|
||||
// std::vector<Complex> boundary = {1,1,1,-1};
|
||||
std::vector<Complex> boundary = {1,1,1,1};
|
||||
FermionOp::ImplParams Params(boundary);
|
||||
|
||||
|
||||
|
||||
RealD mass = 0.0;
|
||||
// FermionOp WilsonOperator(Umu,*FGrid,*FrbGrid,mass);
|
||||
FermionOp WilsonOperator(Umu,*FGrid,*FrbGrid,mass,Params);
|
||||
RealD mass = -0.1;
|
||||
FermionOp WilsonOperator(Umu,*FGrid,*FrbGrid,mass);
|
||||
MdagMLinearOperator<FermionOp,LatticeFermion> HermOp(WilsonOperator); /// <-----
|
||||
//SchurDiagTwoOperator<FermionOp,FermionField> HermOp(WilsonOperator);
|
||||
|
||||
@@ -96,8 +89,7 @@ int main(int argc, char** argv) {
|
||||
FunctionHermOp<FermionField> OpCheby(Cheby,HermOp);
|
||||
PlainHermOp<FermionField> Op (HermOp);
|
||||
|
||||
// ImplicitlyRestartedLanczos<FermionField> IRL(OpCheby, Op, Nstop, Nk, Nm, resid, MaxIt);
|
||||
SimpleLanczos<FermionField> IRL(Op,Nstop, Nk, Nm, resid, MaxIt);
|
||||
ImplicitlyRestartedLanczos<FermionField> IRL(OpCheby, Op, Nstop, Nk, Nm, resid, MaxIt);
|
||||
|
||||
std::vector<RealD> eval(Nm);
|
||||
FermionField src(FGrid);
|
||||
@@ -109,8 +101,7 @@ int main(int argc, char** argv) {
|
||||
};
|
||||
|
||||
int Nconv;
|
||||
// IRL.calc(eval, evec, src, Nconv);
|
||||
IRL.calc(eval, src, Nconv);
|
||||
IRL.calc(eval, evec, src, Nconv);
|
||||
|
||||
std::cout << eval << std::endl;
|
||||
|
||||
|
||||
@@ -27,7 +27,6 @@ directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
#include <Grid/parallelIO/IldgIOtypes.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
@@ -39,29 +38,11 @@ typedef typename WilsonFermionD::FermionField FermionField;
|
||||
|
||||
RealD AllZero(RealD x) { return 0.; }
|
||||
|
||||
template <class T> void writeFile(T& in, std::string const fname){
|
||||
#if 1
|
||||
// Ref: https://github.com/paboyle/Grid/blob/feature/scidac-wp1/tests/debug/Test_general_coarse_hdcg_phys48.cc#L111
|
||||
std::cout << Grid::GridLogMessage << "Writes to: " << fname << std::endl;
|
||||
Grid::emptyUserRecord record;
|
||||
Grid::ScidacWriter WR(in.Grid()->IsBoss());
|
||||
WR.open(fname);
|
||||
WR.writeScidacFieldRecord(in,record,0);
|
||||
WR.close();
|
||||
#endif
|
||||
// What is the appropriate way to throw error?
|
||||
}
|
||||
|
||||
|
||||
namespace Grid {
|
||||
|
||||
struct LanczosParameters: Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParameters,
|
||||
RealD, mass ,
|
||||
RealD, mstep ,
|
||||
Integer, Nstop,
|
||||
Integer, Nk,
|
||||
Integer, Np,
|
||||
RealD, ChebyLow,
|
||||
RealD, ChebyHigh,
|
||||
Integer, ChebyOrder)
|
||||
@@ -134,7 +115,6 @@ int main(int argc, char** argv) {
|
||||
|
||||
LatticeGaugeField Umu(UGrid);
|
||||
// SU<Nc>::HotConfiguration(RNG4, Umu);
|
||||
// SU<Nc>::ColdConfiguration(Umu);
|
||||
|
||||
FieldMetaData header;
|
||||
std::string file("./config");
|
||||
@@ -178,20 +158,10 @@ int main(int argc, char** argv) {
|
||||
}
|
||||
|
||||
mass=LanParams.mass;
|
||||
Nstop=LanParams.Nstop;
|
||||
Nk=LanParams.Nk;
|
||||
Np=LanParams.Np;
|
||||
Nm = Nk + Np;
|
||||
|
||||
FermionField src(FGrid);
|
||||
gaussian(RNG5, src);
|
||||
std::vector<Complex> boundary = {1,1,1,-1};
|
||||
// std::vector<Complex> boundary = {1,1,1,1};
|
||||
FermionOp::ImplParams Params(boundary);
|
||||
|
||||
|
||||
while ( mass > - 2.5){
|
||||
FermionOp WilsonOperator(Umu,*FGrid,*FrbGrid,mass,Params);
|
||||
while ( mass > - 5.0){
|
||||
FermionOp WilsonOperator(Umu,*FGrid,*FrbGrid,mass);
|
||||
MdagMLinearOperator<FermionOp,FermionField> HermOp(WilsonOperator); /// <-----
|
||||
//SchurDiagTwoOperator<FermionOp,FermionField> HermOp(WilsonOperator);
|
||||
Gamma5HermitianLinearOperator <FermionOp,LatticeFermion> HermOp2(WilsonOperator); /// <-----
|
||||
@@ -210,9 +180,10 @@ while ( mass > - 2.5){
|
||||
PlainHermOp<FermionField> Op2 (HermOp2);
|
||||
|
||||
ImplicitlyRestartedLanczos<FermionField> IRL(OpCheby, Op2, Nstop, Nk, Nm, resid, MaxIt);
|
||||
// SimpleLanczos<FermionField> IRL(Op,Nstop, Nk, Nm, resid, MaxIt);
|
||||
|
||||
std::vector<RealD> eval(Nm);
|
||||
FermionField src(FGrid);
|
||||
gaussian(RNG5, src);
|
||||
std::vector<FermionField> evec(Nm, FGrid);
|
||||
for (int i = 0; i < 1; i++) {
|
||||
std::cout << i << " / " << Nm << " grid pointer " << evec[i].Grid()
|
||||
@@ -221,7 +192,6 @@ while ( mass > - 2.5){
|
||||
|
||||
int Nconv;
|
||||
IRL.calc(eval, evec, src, Nconv);
|
||||
// IRL.calc(eval, src, Nconv);
|
||||
|
||||
std::cout << mass <<" : " << eval << std::endl;
|
||||
|
||||
@@ -232,17 +202,9 @@ while ( mass > - 2.5){
|
||||
tmp = g5*evec[i];
|
||||
dot = innerProduct(tmp,evec[i]);
|
||||
std::cout << mass << " : " << eval[i] << " " << real(dot) << " " << imag(dot) << std::endl ;
|
||||
// if ( i<1)
|
||||
{
|
||||
std::string evfile ("./evec_"+std::to_string(mass)+"_"+std::to_string(i));
|
||||
auto evdensity = localInnerProduct(evec[i],evec[i] );
|
||||
writeFile(evdensity,evfile);
|
||||
}
|
||||
}
|
||||
src = evec[0]+evec[1]+evec[2];
|
||||
src += evec[3]+evec[4]+evec[5];
|
||||
src += evec[6]+evec[7]+evec[8];
|
||||
mass += LanParams.mstep;
|
||||
mass += -0.1;
|
||||
}
|
||||
|
||||
Grid_finalize();
|
||||
|
||||
@@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.12 FATAL_ERROR)
|
||||
|
||||
project(GridViewer)
|
||||
|
||||
list(APPEND CMAKE_PREFIX_PATH "/Users/peterboyle/QCD/vtk/VTK-9.4.2-install/")
|
||||
list(APPEND CMAKE_PREFIX_PATH "/home/paboyle/Visualisation/install/")
|
||||
|
||||
find_package(VTK COMPONENTS
|
||||
CommonColor
|
||||
|
||||
@@ -48,15 +48,14 @@ typedef vtkMarchingCubes isosurface;
|
||||
|
||||
int mpeg = 0 ;
|
||||
int xlate = 0 ;
|
||||
int framerate = 10;
|
||||
|
||||
template <class T> void readFile(T& out, std::string const fname){
|
||||
#ifdef HAVE_LIME
|
||||
Grid::emptyUserRecord record;
|
||||
Grid::ScidacReader RD;
|
||||
RD.open(fname);
|
||||
RD.readScidacFieldRecord(out,record);
|
||||
RD.close();
|
||||
#endif
|
||||
}
|
||||
using namespace Grid;
|
||||
|
||||
@@ -208,6 +207,10 @@ int main(int argc, char* argv[])
|
||||
xlate = 1;
|
||||
}
|
||||
|
||||
if( GridCmdOptionExists(argv,argv+argc,"--fps") ){
|
||||
arg=GridCmdOptionPayload(argv,argv+argc,"--fps");
|
||||
GridCmdOptionInt(arg,framerate);
|
||||
}
|
||||
if( GridCmdOptionExists(argv,argv+argc,"--isosurface") ){
|
||||
arg=GridCmdOptionPayload(argv,argv+argc,"--isosurface");
|
||||
GridCmdOptionFloat(arg,default_contour);
|
||||
@@ -420,7 +423,7 @@ int main(int argc, char* argv[])
|
||||
|
||||
vtkFFMPEGWriter *writer = vtkFFMPEGWriter::New();
|
||||
writer->SetFileName("movie.avi");
|
||||
writer->SetRate(1);
|
||||
writer->SetRate(framerate);
|
||||
writer->SetInputConnection(imageFilter->GetOutputPort());
|
||||
writer->Start();
|
||||
|
||||
@@ -477,7 +480,7 @@ int main(int argc, char* argv[])
|
||||
slidercallback->fu_list = fu_list;
|
||||
sliderWidget->AddObserver(vtkCommand::InteractionEvent, slidercallback);
|
||||
|
||||
int timerId = iren->CreateRepeatingTimer(300);
|
||||
int timerId = iren->CreateRepeatingTimer(1000/framerate);
|
||||
std::cout << "timerId: " << timerId << std::endl;
|
||||
|
||||
// Start the interaction and timer
|
||||
|
||||
@@ -73,6 +73,21 @@ each to:
|
||||
|
||||
VTK really should make it easier to pick up the flags required for FFMPEG linkage, especially as they are very quirky on MacOS.
|
||||
|
||||
========================================
|
||||
Aurora compilation:
|
||||
========================================
|
||||
module load ffmpeg
|
||||
download & untar: VTK-7.0.2
|
||||
mkdir build
|
||||
cd build
|
||||
ccmake ../
|
||||
|
||||
"t"
|
||||
Enable: VTK_MODULE_ENABLE_VTK_IOFFMPEG YES
|
||||
"configure" ; should "discover" the installed ffmpeg module
|
||||
|
||||
Still need an "X" connection to make the MPEG files.
|
||||
|
||||
|
||||
========================================
|
||||
Grid:
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
libs=`grid-config --libs`
|
||||
ldflags=`grid-config --ldflags`
|
||||
cxxflags=`grid-config --cxxflags`
|
||||
cxx=`grid-config --cxx`
|
||||
export grid_config=/home/paboyle/GPT/install/bin/grid-config
|
||||
libs=`$grid_config --libs`
|
||||
ldflags=`$grid_config --ldflags`
|
||||
cxxflags=`$grid_config --cxxflags`
|
||||
cxx=`$grid_config --cxx`
|
||||
cc=icx
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
|
||||
LDFLAGS="$ldflags $libs " cmake .. -DCMAKE_CXX_COMPILER=$cxx -DCMAKE_CXX_FLAGS=$cxxflags
|
||||
echo CC $cc
|
||||
echo CXX $cxx
|
||||
echo CXXFLAGS $cxxflags
|
||||
echo LDFLAGS $ldflags
|
||||
echo LIBS $libs
|
||||
|
||||
LDFLAGS="$ldflags $libs " cmake .. -DCMAKE_C_COMPILER=$cc -DCMAKE_CXX_COMPILER="$cxx" -DCMAKE_CXX_FLAGS="$cxxflags "
|
||||
|
||||
Reference in New Issue
Block a user