mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-19 08:17:05 +01:00
Merge branch 'develop' into feature/gpu-port
This commit is contained in:
201
Grid/threads/Pragmas.h
Normal file
201
Grid/threads/Pragmas.h
Normal file
@ -0,0 +1,201 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/Threads.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#pragma once
|
||||
|
||||
#ifndef MAX
|
||||
#define MAX(x,y) ((x)>(y)?(x):(y))
|
||||
#define MIN(x,y) ((x)>(y)?(y):(x))
|
||||
#endif
|
||||
|
||||
#define strong_inline __attribute__((always_inline)) inline
|
||||
|
||||
#ifdef _OPENMP
|
||||
#define GRID_OMP
|
||||
#include <omp.h>
|
||||
#endif
|
||||
|
||||
#ifdef __NVCC__
|
||||
#define GRID_NVCC
|
||||
#endif
|
||||
|
||||
#define UNROLL _Pragma("unroll")
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
// Old primitives; shortly deprecate
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifdef GRID_OMP
|
||||
#define PARALLEL_FOR_LOOP _Pragma("omp parallel for schedule(static)")
|
||||
#define PARALLEL_FOR_LOOP_INTERN _Pragma("omp for schedule(static)")
|
||||
#define PARALLEL_NESTED_LOOP2 _Pragma("omp parallel for collapse(2)")
|
||||
#define PARALLEL_NESTED_LOOP5 _Pragma("omp parallel for collapse(5)")
|
||||
#define PARALLEL_REGION _Pragma("omp parallel")
|
||||
#define PARALLEL_CRITICAL _Pragma("omp critical")
|
||||
#else
|
||||
#define PARALLEL_FOR_LOOP
|
||||
#define PARALLEL_FOR_LOOP_INTERN
|
||||
#define PARALLEL_FOR_LOOP_REDUCE(op, var)
|
||||
#define PARALLEL_NESTED_LOOP2
|
||||
#define PARALLEL_NESTED_LOOP5
|
||||
#define PARALLEL_REGION
|
||||
#define PARALLEL_CRITICAL
|
||||
#endif
|
||||
|
||||
#define parallel_region PARALLEL_REGION
|
||||
#define parallel_for PARALLEL_FOR_LOOP for
|
||||
#define parallel_for_internal PARALLEL_FOR_LOOP_INTERN for
|
||||
#define parallel_for_nest2 PARALLEL_NESTED_LOOP2 for
|
||||
#define parallel_for_nest5 PARALLEL_NESTED_LOOP5 for
|
||||
#define parallel_critical PARALLEL_CRITICAL
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
// New primitives; explicit host thread calls, and accelerator data parallel calls
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
#ifdef GRID_OMP
|
||||
|
||||
#define DO_PRAGMA_(x) _Pragma (#x)
|
||||
#define DO_PRAGMA(x) DO_PRAGMA_(x)
|
||||
|
||||
#define thread_loop( range , ... ) DO_PRAGMA(omp parallel for schedule(static)) for range { __VA_ARGS__ ; };
|
||||
#define thread_loop_in_region( range , ... ) DO_PRAGMA(omp for schedule(static)) for range { __VA_ARGS__ ; };
|
||||
#define thread_loop_collapse2( range , ... ) DO_PRAGMA(omp parallel for collapse(2)) for range { __VA_ARGS__ };
|
||||
#define thread_loop_collapse( N , range , ... ) DO_PRAGMA(omp parallel for collapse ( N ) ) for range { __VA_ARGS__ };
|
||||
#define thread_loop_collapse_in_region( N , range , ... ) DO_PRAGMA(omp for collapse ( N )) for range { __VA_ARGS__ };
|
||||
#define thread_region DO_PRAGMA("omp parallel")
|
||||
#define thread_critical DO_PRAGMA("omp critical")
|
||||
#define thread_num(a) omp_get_thread_num()
|
||||
#define thread_max(a) omp_get_max_threads()
|
||||
#else
|
||||
#define thread_loop( range , ... ) for range { __VA_ARGS__ ; };
|
||||
#define thread_loop_collapse2( range , ... ) for range { __VA_ARGS__ ; };
|
||||
#define thread_loop_collapse ( N , range , ... ) for range { __VA_ARGS__ ; };
|
||||
#define thread_region
|
||||
#define thread_loop_in_region( range , ... ) for range { __VA_ARGS__ ; };
|
||||
#define thread_loop_collapse_in_region( N, range , ... ) for range { __VA_ARGS__ ; };
|
||||
|
||||
#define thread_critical
|
||||
#define thread_num(a) (0)
|
||||
#define thread_max(a) (1)
|
||||
#endif
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
// Accelerator primitives; fall back to threading
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
#ifdef GRID_NVCC
|
||||
|
||||
extern uint32_t gpu_threads;
|
||||
|
||||
template<typename lambda> __global__
|
||||
void LambdaApply(uint64_t base, uint64_t Num, lambda Lambda)
|
||||
{
|
||||
uint64_t ss = blockIdx.x*blockDim.x + threadIdx.x;
|
||||
if ( ss < Num ) {
|
||||
Lambda(ss+base);
|
||||
}
|
||||
}
|
||||
|
||||
#define accelerator __host__ __device__
|
||||
#define accelerator_inline __host__ __device__ inline
|
||||
#define accelerator_loop( iterator, range, ... ) \
|
||||
typedef decltype(range.begin()) Iterator; \
|
||||
auto lambda = [=] accelerator (Iterator iterator) mutable { \
|
||||
__VA_ARGS__; \
|
||||
}; \
|
||||
Iterator num = range.end() - range.begin(); \
|
||||
Iterator base = range.begin(); \
|
||||
Iterator num_block = (num+gpu_threads-1)/gpu_threads; \
|
||||
LambdaApply<<<num_block,gpu_threads>>>(base,num,lambda); \
|
||||
cudaDeviceSynchronize(); \
|
||||
cudaError err = cudaGetLastError(); \
|
||||
if ( cudaSuccess != err ) { \
|
||||
printf("Cuda error %s\n",cudaGetErrorString( err )); \
|
||||
exit(0); \
|
||||
}
|
||||
|
||||
#define accelerator_loopN( iterator, num, ... ) \
|
||||
typedef decltype(num) Iterator; \
|
||||
if ( num > 0 ) { \
|
||||
auto lambda = [=] accelerator (Iterator iterator) mutable { \
|
||||
__VA_ARGS__; \
|
||||
}; \
|
||||
Iterator base = 0; \
|
||||
Iterator num_block = (num+gpu_threads-1)/gpu_threads; \
|
||||
LambdaApply<<<num_block,gpu_threads>>>(base,num,lambda); \
|
||||
cudaDeviceSynchronize(); \
|
||||
cudaError err = cudaGetLastError(); \
|
||||
if ( cudaSuccess != err ) { \
|
||||
printf("Cuda error %s\n",cudaGetErrorString( err )); \
|
||||
exit(0); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define cpu_loop( iterator, range, ... ) thread_loop( (auto iterator = range.begin();iterator<range.end();iterator++), { __VA_ARGS__ });
|
||||
|
||||
template<typename lambda> __global__
|
||||
void LambdaApply2D(uint64_t Osites, uint64_t Isites, lambda Lambda)
|
||||
{
|
||||
uint64_t site = threadIdx.x + blockIdx.x*blockDim.x;
|
||||
uint64_t osite = site / Isites;
|
||||
if ( (osite <Osites) ) {
|
||||
Lambda(osite);
|
||||
}
|
||||
}
|
||||
|
||||
#define coalesce_loop( iterator, range, nsimd, ... ) \
|
||||
typedef uint64_t Iterator; \
|
||||
auto lambda = [=] accelerator (Iterator iterator) mutable { \
|
||||
__VA_ARGS__; \
|
||||
}; \
|
||||
Iterator num = range.end() - range.begin(); \
|
||||
Iterator base = range.begin(); \
|
||||
Iterator cu_threads= gpu_threads; \
|
||||
Iterator cu_blocks = num*nsimd/cu_threads; \
|
||||
LambdaApply2D<<<cu_blocks,cu_threads>>>(num,(uint64_t)nsimd,lambda); \
|
||||
cudaDeviceSynchronize(); \
|
||||
cudaError err = cudaGetLastError(); \
|
||||
if ( cudaSuccess != err ) { \
|
||||
printf("Cuda error %s\n",cudaGetErrorString( err )); \
|
||||
exit(0); \
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define accelerator
|
||||
#define accelerator_inline strong_inline
|
||||
#define accelerator_loop( iterator, range, ... ) \
|
||||
thread_loop( (auto iterator = range.begin();iterator<range.end();iterator++), { __VA_ARGS__ });
|
||||
|
||||
#define accelerator_loopN( iterator, num, ... ) \
|
||||
thread_loop( (auto iterator = 0;iterator<num;iterator++), { __VA_ARGS__ });
|
||||
|
||||
#define cpu_loop( iterator, range, ... ) \
|
||||
thread_loop( (auto iterator = range.begin();iterator<range.end();iterator++), { __VA_ARGS__ });
|
||||
|
||||
#endif
|
128
Grid/threads/Threads.h
Normal file
128
Grid/threads/Threads.h
Normal file
@ -0,0 +1,128 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/Threads.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#pragma once
|
||||
|
||||
|
||||
// Introduce a class to gain deterministic bit reproducible reduction.
|
||||
// make static; perhaps just a namespace is required.
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
class GridThread {
|
||||
public:
|
||||
static int _threads;
|
||||
static int _hyperthreads;
|
||||
static int _cores;
|
||||
|
||||
static void SetCores(int cr) {
|
||||
#ifdef GRID_OMP
|
||||
_cores = cr;
|
||||
#else
|
||||
_cores = 1;
|
||||
#endif
|
||||
}
|
||||
static void SetThreads(int thr) {
|
||||
#ifdef GRID_OMP
|
||||
_threads = MIN(thr,omp_get_max_threads()) ;
|
||||
omp_set_num_threads(_threads);
|
||||
#else
|
||||
_threads = 1;
|
||||
#endif
|
||||
};
|
||||
static void SetMaxThreads(void) {
|
||||
#ifdef GRID_OMP
|
||||
_threads = omp_get_max_threads();
|
||||
omp_set_num_threads(_threads);
|
||||
#else
|
||||
_threads = 1;
|
||||
#endif
|
||||
};
|
||||
static int GetHyperThreads(void) { assert(_threads%_cores ==0); return _threads/_cores; };
|
||||
static int GetCores(void) { return _cores; };
|
||||
static int GetThreads(void) { return _threads; };
|
||||
static int SumArraySize(void) {return _threads;};
|
||||
|
||||
static void GetWork(int nwork, int me, int & mywork, int & myoff){
|
||||
GetWork(nwork,me,mywork,myoff,_threads);
|
||||
}
|
||||
static void GetWork(int nwork, int me, int & mywork, int & myoff,int units){
|
||||
int basework = nwork/units;
|
||||
int backfill = units-(nwork%units);
|
||||
if ( me >= units ) {
|
||||
mywork = myoff = 0;
|
||||
} else {
|
||||
mywork = (nwork+me)/units;
|
||||
myoff = basework * me;
|
||||
if ( me > backfill )
|
||||
myoff+= (me-backfill);
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
static void GetWorkBarrier(int nwork, int &me, int & mywork, int & myoff){
|
||||
me = ThreadBarrier();
|
||||
GetWork(nwork,me,mywork,myoff);
|
||||
};
|
||||
|
||||
static int ThreadBarrier(void) {
|
||||
#ifdef GRID_OMP
|
||||
#pragma omp barrier
|
||||
return omp_get_thread_num();
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
};
|
||||
|
||||
template<class obj> static void ThreadSum( std::vector<obj> &sum_array,obj &val,int me){
|
||||
sum_array[me] = val;
|
||||
val=Zero();
|
||||
ThreadBarrier();
|
||||
for(int i=0;i<_threads;i++) val+= sum_array[i];
|
||||
ThreadBarrier();
|
||||
}
|
||||
|
||||
static void bcopy(const void *src, void *dst, size_t len) {
|
||||
#ifdef GRID_OMP
|
||||
#pragma omp parallel
|
||||
{
|
||||
const char *c_src =(char *) src;
|
||||
char *c_dest=(char *) dst;
|
||||
int me,mywork,myoff;
|
||||
GridThread::GetWorkBarrier(len,me, mywork,myoff);
|
||||
bcopy(&c_src[myoff],&c_dest[myoff],mywork);
|
||||
}
|
||||
#else
|
||||
bcopy(src,dst,len);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
Reference in New Issue
Block a user