#include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef GRID_HIP #include #endif #ifdef GRID_CUDA #include #endif #ifdef GRID_SYCL #include #endif #ifdef GRID_SYCL #include #include cl::sycl::queue *theAccelerator; void acceleratorInit(void) { int nDevices = 1; cl::sycl::gpu_selector selector; cl::sycl::device selectedDevice { selector }; theAccelerator = new sycl::queue (selectedDevice); auto name = theAccelerator->get_device().get_info(); printf("AcceleratorSyclInit: Selected device is %s\n",name.c_str()); fflush(stdout); } inline void *acceleratorAllocDevice(size_t bytes){ return malloc_device(bytes,*theAccelerator);}; inline void acceleratorFreeDevice(void *ptr){free(ptr,*theAccelerator);}; inline void acceleratorFreeDevice(void *ptr,size_t bytes){free(ptr,*theAccelerator);}; inline void acceleratorMemSet(void *base,int value,size_t bytes) { theAccelerator->memset(base,value,bytes); theAccelerator->wait();} inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { theAccelerator->memcpy(to,from,bytes); theAccelerator->wait();} inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ theAccelerator->memcpy(to,from,bytes); theAccelerator->wait();} #define accelerator_barrier(dummy) { theAccelerator->wait(); } #endif #ifdef GRID_HIP hipStream_t copyStream; hipStream_t computeStream; void acceleratorInit(void) { int device = 0; auto discard = hipSetDevice(device); discard = hipStreamCreate(©Stream); discard = hipStreamCreate(&computeStream); printf("AcceleratorHIPInit\n"); } inline void *acceleratorAllocDevice(size_t bytes) { void *ptr=NULL; auto err = hipMalloc((void **)&ptr,bytes); if( err != hipSuccess ) { ptr = (void *) NULL; fprintf(stderr," hipMalloc failed for %ld %s \n",bytes,hipGetErrorString(err)); fflush(stderr); } return ptr; }; inline void acceleratorFreeDevice(void *ptr,size_t bytes){ auto discard=hipFree(ptr);}; inline void acceleratorFreeDevice(void *ptr){ auto discard=hipFree(ptr);}; inline void acceleratorMemSet(void *base,int value,size_t bytes) { auto discard=hipMemset(base,value,bytes);} inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { auto discard=hipMemcpy(to,from,bytes, hipMemcpyHostToDevice);} inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ auto discard=hipMemcpy(to,from,bytes, hipMemcpyDeviceToHost);} #define accelerator_barrier(dummy) \ { \ auto tmp=hipStreamSynchronize(computeStream); \ auto err = hipGetLastError(); \ if ( err != hipSuccess ) { \ printf("After hipDeviceSynchronize() : HIP error %s \n", hipGetErrorString( err )); \ puts(__FILE__); \ printf("Line %d\n",__LINE__); \ exit(0); \ } \ } #endif #ifdef GRID_CUDA cudaStream_t copyStream; cudaStream_t computeStream; void acceleratorInit(void) { int device = 0; cudaSetDevice(device); cudaStreamCreate(©Stream); cudaStreamCreate(&computeStream); } inline void *acceleratorAllocDevice(size_t bytes) { void *ptr=NULL; auto err = cudaMalloc((void **)&ptr,bytes); if( err != cudaSuccess ) { ptr = (void *) NULL; printf(" cudaMalloc failed for %d %s \n",bytes,cudaGetErrorString(err)); } return ptr; }; inline void acceleratorFreeShared(void *ptr){ cudaFree(ptr);}; inline void acceleratorFreeDevice(void *ptr){ cudaFree(ptr);}; inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { cudaMemcpy(to,from,bytes, cudaMemcpyHostToDevice);} inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ cudaMemcpy(to,from,bytes, cudaMemcpyDeviceToHost);} inline void acceleratorMemSet(void *base,int value,size_t bytes) { cudaMemset(base,value,bytes);} #define accelerator_barrier(dummy) \ { \ cudaStreamSynchronize(computeStream); \ cudaError err = cudaGetLastError(); \ if ( cudaSuccess != err ) { \ printf("accelerator_barrier(): Cuda error %s \n", \ cudaGetErrorString( err )); \ printf("File %s Line %d\n",__FILE__,__LINE__); \ fflush(stdout); \ if (acceleratorAbortOnGpuError) assert(err==cudaSuccess); \ } \ } #endif template void acceleratorPut(T& dev,T&host) { acceleratorCopyToDevice(&host,&dev,sizeof(T)); } template T acceleratorGet(T& dev) { T host; acceleratorCopyFromDevice(&dev,&host,sizeof(T)); return host; } /************************************************************** * Allocator ************************************************************** */ template class devAllocator { public: typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef _Tp* pointer; typedef const _Tp* const_pointer; typedef _Tp& reference; typedef const _Tp& const_reference; typedef _Tp value_type; template struct rebind { typedef devAllocator<_Tp1> other; }; devAllocator() throw() { } devAllocator(const devAllocator&) throw() { } template devAllocator(const devAllocator<_Tp1>&) throw() { } ~devAllocator() throw() { } pointer address(reference __x) const { return &__x; } size_type max_size() const throw() { return size_t(-1) / sizeof(_Tp); } pointer allocate(size_type __n, const void* _p= 0) { size_type bytes = __n*sizeof(_Tp); _Tp *ptr = (_Tp*) acceleratorAllocDevice(bytes); if ( (_Tp*)ptr == (_Tp *) NULL ) { printf("Grid Device Allocator got NULL for %lu bytes\n",(unsigned long) bytes ); } assert( ( (_Tp*)ptr != (_Tp *)NULL ) ); return ptr; } void deallocate(pointer __p, size_type __n) { size_type bytes = __n * sizeof(_Tp); acceleratorFreeDevice((void *)__p,bytes); } void construct(pointer __p, const _Tp& __val) { }; void construct(pointer __p) { }; void destroy(pointer __p) { }; }; template using deviceVector = std::vector >; /************************************************************** * Microsecond timer ************************************************************** */ inline double usecond(void) { struct timeval tv; gettimeofday(&tv,NULL); return 1.0e6*tv.tv_sec + 1.0*tv.tv_usec; } typedef float RealF; typedef double RealD; typedef std::complex ComplexF; typedef std::complex ComplexD; /////////////////////////////////////////////////////////////////////// // Need to rearrange lattice data to be in the right format for a // batched multiply. Might as well make these static, dense packed /////////////////////////////////////////////////////////////////////// #ifdef GRID_HIP typedef hipblasHandle_t gridblasHandle_t; #endif #ifdef GRID_CUDA typedef cublasHandle_t gridblasHandle_t; #endif #ifdef GRID_SYCL typedef cl::sycl::queue *gridblasHandle_t; #endif #ifdef GRID_ONE_MKL typedef cl::sycl::queue *gridblasHandle_t; #endif #if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP) && !defined(GRID_ONE_MKL) typedef int32_t gridblasHandle_t; #endif enum GridBLASOperation_t { GridBLAS_OP_N, GridBLAS_OP_T, GridBLAS_OP_C } ; class GridBLAS { public: static gridblasHandle_t gridblasHandle; static int gridblasInit; static void Init(void) { if ( ! gridblasInit ) { #ifdef GRID_CUDA std::cout << "cublasCreate"<wait(); #endif } ///////////////////////////////////////////////////////////// // Single matrix GEMM -- fp64 and fp32 ///////////////////////////////////////////////////////////// void gemm(GridBLASOperation_t OpA, GridBLASOperation_t OpB, int m,int n, int k, ComplexD alpha, ComplexD* Amk, // Device pointer ComplexD* Bkn, ComplexD beta, ComplexD* Cmn) { RealD t2=usecond(); assert(OpA!=GridBLAS_OP_T); // Complex case expect no transpose assert(OpB!=GridBLAS_OP_T); int lda = m; // m x k column major int ldb = k; // k x n column major int ldc = m; // m x b column major if(OpA!=GridBLAS_OP_N) lda = k; if(OpB!=GridBLAS_OP_N) ldb = n; static deviceVector alpha_p(1); static deviceVector beta_p(1); // can prestore the 1 and the zero on device acceleratorCopyToDevice((void *)&alpha,(void *)&alpha_p[0],sizeof(ComplexD)); acceleratorCopyToDevice((void *)&beta ,(void *)&beta_p[0],sizeof(ComplexD)); RealD t0=usecond(); #ifdef GRID_HIP hipblasOperation_t hOpA; hipblasOperation_t hOpB; if ( OpA == GridBLAS_OP_N ) hOpA = HIPBLAS_OP_N; if ( OpA == GridBLAS_OP_T ) hOpA = HIPBLAS_OP_T; if ( OpA == GridBLAS_OP_C ) hOpA = HIPBLAS_OP_C; if ( OpB == GridBLAS_OP_N ) hOpB = HIPBLAS_OP_N; if ( OpB == GridBLAS_OP_T ) hOpB = HIPBLAS_OP_T; if ( OpB == GridBLAS_OP_C ) hOpB = HIPBLAS_OP_C; auto err = hipblasZgemm(gridblasHandle, hOpA, hOpB, m,n,k, (hipblasDoubleComplex *) &alpha_p[0], (hipblasDoubleComplex *) Amk, lda, (hipblasDoubleComplex *) Bkn, ldb, (hipblasDoubleComplex *) &beta_p[0], (hipblasDoubleComplex *) Cmn, ldc); assert(err==HIPBLAS_STATUS_SUCCESS); #endif #ifdef GRID_CUDA cublasOperation_t hOpA; cublasOperation_t hOpB; if ( OpA == GridBLAS_OP_N ) hOpA = CUBLAS_OP_N; if ( OpA == GridBLAS_OP_T ) hOpA = CUBLAS_OP_T; if ( OpA == GridBLAS_OP_C ) hOpA = CUBLAS_OP_C; if ( OpB == GridBLAS_OP_N ) hOpB = CUBLAS_OP_N; if ( OpB == GridBLAS_OP_T ) hOpB = CUBLAS_OP_T; if ( OpB == GridBLAS_OP_C ) hOpB = CUBLAS_OP_C; auto err = cublasZgemm(gridblasHandle, hOpA, hOpB, m,n,k, (cuDoubleComplex *) &alpha_p[0], (cuDoubleComplex *) Amk, lda, (cuDoubleComplex *) Bkn, ldb, (cuDoubleComplex *) &beta_p[0], (cuDoubleComplex *) Cmn, ldc); assert(err==CUBLAS_STATUS_SUCCESS); #endif #ifdef GRID_SYCL int64_t m64=m; int64_t n64=n; int64_t k64=k; int64_t lda64=lda; int64_t ldb64=ldb; int64_t ldc64=ldc; oneapi::mkl::transpose iOpA; oneapi::mkl::transpose iOpB; if ( OpA == GridBLAS_OP_N ) iOpA = oneapi::mkl::transpose::N; if ( OpA == GridBLAS_OP_T ) iOpA = oneapi::mkl::transpose::T; if ( OpA == GridBLAS_OP_C ) iOpA = oneapi::mkl::transpose::C; if ( OpB == GridBLAS_OP_N ) iOpB = oneapi::mkl::transpose::N; if ( OpB == GridBLAS_OP_T ) iOpB = oneapi::mkl::transpose::T; if ( OpB == GridBLAS_OP_C ) iOpB = oneapi::mkl::transpose::C; oneapi::mkl::blas::column_major::gemm(*gridblasHandle, iOpA, iOpB, m64,n64,k64, (ComplexD *) &alpha_p[0], (const ComplexD *)Amk, (int64_t )lda64, (const ComplexD *)Bkn, (int64_t )ldb64, (ComplexD *) &beta_p[0], (ComplexD *)Cmn, (int64_t)ldc64); synchronise(); #endif #if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP) // Need a default/reference implementation; use Eigen if ( (OpA == GridBLAS_OP_N ) && (OpB == GridBLAS_OP_N) ) { Eigen::Map eAmk(Amk,m,k); Eigen::Map eBkn(Bkn,k,n); Eigen::Map eCmn(Cmn,m,n); eCmn = beta * eCmn + alpha * eAmk * eBkn ; } else if ( (OpA == GridBLAS_OP_C ) && (OpB == GridBLAS_OP_N) ) { Eigen::Map eAmk(Amk,k,m); Eigen::Map eBkn(Bkn,k,n); Eigen::Map eCmn(Cmn,m,n); eCmn = beta * eCmn + alpha * eAmk.adjoint() * eBkn ; } else if ( (OpA == GridBLAS_OP_N ) && (OpB == GridBLAS_OP_C) ) { Eigen::Map eAmk(Amk,m,k); Eigen::Map eBkn(Bkn,n,k); Eigen::Map eCmn(Cmn,m,n); eCmn = beta * eCmn + alpha * eAmk * eBkn.adjoint() ; } else if ( (OpA == GridBLAS_OP_C ) && (OpB == GridBLAS_OP_C) ) { Eigen::Map eAmk(Amk,k,m); Eigen::Map eBkn(Bkn,n,k); Eigen::Map eCmn(Cmn,m,n); eCmn = beta * eCmn + alpha * eAmk.adjoint() * eBkn.adjoint() ; } else { assert(0); } #endif RealD t1=usecond(); RealD flops = 8.0*m*n*k; RealD bytes = 1.0*sizeof(ComplexD)*(m*k+k*n+m*n); } void gemm(GridBLASOperation_t OpA, GridBLASOperation_t OpB, int m,int n, int k, ComplexF alpha, ComplexF* Amk, // Device pointer ComplexF* Bkn, ComplexF beta, ComplexF* Cmn) { RealD t2=usecond(); assert(OpA!=GridBLAS_OP_T); // Complex case expect no transpose assert(OpB!=GridBLAS_OP_T); int lda = m; // m x k column major int ldb = k; // k x n column major int ldc = m; // m x b column major if(OpA!=GridBLAS_OP_N) lda = k; if(OpB!=GridBLAS_OP_N) ldb = n; static deviceVector alpha_p(1); static deviceVector beta_p(1); // can prestore the 1 and the zero on device acceleratorCopyToDevice((void *)&alpha,(void *)&alpha_p[0],sizeof(ComplexF)); acceleratorCopyToDevice((void *)&beta ,(void *)&beta_p[0],sizeof(ComplexF)); RealD t0=usecond(); #ifdef GRID_HIP hipblasOperation_t hOpA; hipblasOperation_t hOpB; if ( OpA == GridBLAS_OP_N ) hOpA = HIPBLAS_OP_N; if ( OpA == GridBLAS_OP_T ) hOpA = HIPBLAS_OP_T; if ( OpA == GridBLAS_OP_C ) hOpA = HIPBLAS_OP_C; if ( OpB == GridBLAS_OP_N ) hOpB = HIPBLAS_OP_N; if ( OpB == GridBLAS_OP_T ) hOpB = HIPBLAS_OP_T; if ( OpB == GridBLAS_OP_C ) hOpB = HIPBLAS_OP_C; auto err = hipblasCgemm(gridblasHandle, hOpA, hOpB, m,n,k, (hipblasComplex *) &alpha_p[0], (hipblasComplex *) Amk, lda, (hipblasComplex *) Bkn, ldb, (hipblasComplex *) &beta_p[0], (hipblasComplex *) Cmn, ldc); assert(err==HIPBLAS_STATUS_SUCCESS); #endif #ifdef GRID_CUDA cublasOperation_t hOpA; cublasOperation_t hOpB; if ( OpA == GridBLAS_OP_N ) hOpA = CUBLAS_OP_N; if ( OpA == GridBLAS_OP_T ) hOpA = CUBLAS_OP_T; if ( OpA == GridBLAS_OP_C ) hOpA = CUBLAS_OP_C; if ( OpB == GridBLAS_OP_N ) hOpB = CUBLAS_OP_N; if ( OpB == GridBLAS_OP_T ) hOpB = CUBLAS_OP_T; if ( OpB == GridBLAS_OP_C ) hOpB = CUBLAS_OP_C; auto err = cublasCgemm(gridblasHandle, hOpA, hOpB, m,n,k, (cuComplex *) &alpha_p[0], (cuComplex *) Amk, lda, (cuComplex *) Bkn, ldb, (cuComplex *) &beta_p[0], (cuComplex *) Cmn, ldc); assert(err==CUBLAS_STATUS_SUCCESS); #endif #ifdef GRID_SYCL int64_t m64=m; int64_t n64=n; int64_t k64=k; int64_t lda64=lda; int64_t ldb64=ldb; int64_t ldc64=ldc; oneapi::mkl::transpose iOpA; oneapi::mkl::transpose iOpB; if ( OpA == GridBLAS_OP_N ) iOpA = oneapi::mkl::transpose::N; if ( OpA == GridBLAS_OP_T ) iOpA = oneapi::mkl::transpose::T; if ( OpA == GridBLAS_OP_C ) iOpA = oneapi::mkl::transpose::C; if ( OpB == GridBLAS_OP_N ) iOpB = oneapi::mkl::transpose::N; if ( OpB == GridBLAS_OP_T ) iOpB = oneapi::mkl::transpose::T; if ( OpB == GridBLAS_OP_C ) iOpB = oneapi::mkl::transpose::C; oneapi::mkl::blas::column_major::gemm(*gridblasHandle, iOpA, iOpB, m64,n64,k64, (ComplexF *) &alpha_p[0], (const ComplexF *)Amk, (int64_t )lda64, (const ComplexF *)Bkn, (int64_t )ldb64, (ComplexF *) &beta_p[0], (ComplexF *)Cmn, (int64_t )ldc64); synchronise(); #endif #if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP) // Need a default/reference implementation; use Eigen if ( (OpA == GridBLAS_OP_N ) && (OpB == GridBLAS_OP_N) ) { Eigen::Map eAmk(Amk,m,k); Eigen::Map eBkn(Bkn,k,n); Eigen::Map eCmn(Cmn,m,n); eCmn = beta * eCmn + alpha * eAmk * eBkn ; } else if ( (OpA == GridBLAS_OP_C ) && (OpB == GridBLAS_OP_N) ) { Eigen::Map eAmk(Amk,k,m); Eigen::Map eBkn(Bkn,k,n); Eigen::Map eCmn(Cmn,m,n); eCmn = beta * eCmn + alpha * eAmk.adjoint() * eBkn ; } else if ( (OpA == GridBLAS_OP_N ) && (OpB == GridBLAS_OP_C) ) { Eigen::Map eAmk(Amk,m,k); Eigen::Map eBkn(Bkn,n,k); Eigen::Map eCmn(Cmn,m,n); eCmn = beta * eCmn + alpha * eAmk * eBkn.adjoint() ; } else if ( (OpA == GridBLAS_OP_C ) && (OpB == GridBLAS_OP_C) ) { Eigen::Map eAmk(Amk,k,m); Eigen::Map eBkn(Bkn,n,k); Eigen::Map eCmn(Cmn,m,n); eCmn = beta * eCmn + alpha * eAmk.adjoint() * eBkn.adjoint() ; } else { assert(0); } #endif RealD t1=usecond(); RealD flops = 8.0*m*n*k; RealD bytes = 1.0*sizeof(ComplexF)*(m*k+k*n+m*n); } ///////////////////////////////////////////////////////////// void gemmBatched(int m,int n, int k, ComplexD alpha, deviceVector &Amk, // pointer list to matrices deviceVector &Bkn, ComplexD beta, deviceVector &Cmn) { gemmBatched(GridBLAS_OP_N,GridBLAS_OP_N, m,n,k, alpha, Amk, Bkn, beta, Cmn); } void gemmBatched(int m,int n, int k, ComplexF alpha, deviceVector &Amk, // pointer list to matrices deviceVector &Bkn, ComplexF beta, deviceVector &Cmn) { gemmBatched(GridBLAS_OP_N,GridBLAS_OP_N, m,n,k, alpha, Amk, Bkn, beta, Cmn); } void gemmBatched(GridBLASOperation_t OpA, GridBLASOperation_t OpB, int m,int n, int k, ComplexD alpha, deviceVector &Amk, // pointer list to matrices deviceVector &Bkn, ComplexD beta, deviceVector &Cmn) { RealD t2=usecond(); int32_t batchCount = Amk.size(); assert(Bkn.size()==batchCount); assert(Cmn.size()==batchCount); assert(OpA!=GridBLAS_OP_T); // Complex case expect no transpose assert(OpB!=GridBLAS_OP_T); int lda = m; // m x k column major int ldb = k; // k x n column major int ldc = m; // m x b column major if(OpA!=GridBLAS_OP_N) lda = k; if(OpB!=GridBLAS_OP_N) ldb = n; static deviceVector alpha_p(1); static deviceVector beta_p(1); // can prestore the 1 and the zero on device acceleratorCopyToDevice((void *)&alpha,(void *)&alpha_p[0],sizeof(ComplexD)); acceleratorCopyToDevice((void *)&beta ,(void *)&beta_p[0],sizeof(ComplexD)); RealD t0=usecond(); // std::cout << "ZgemmBatched mnk "<()); synchronise(); #if 0 // This code was used to check the mat mul on Sunspot/OneMKL std::cerr << " Called SYCL batched ZGEMM OpA "<< OpA << " OpB "< A(m*k); // pointer list to matrices std::vector B(k*n); std::vector C(m*n); // int sda = lda*k; // int sdb = ldb*k; // int sdc = ldc*n; std::cerr << " Checking the GEMM results "< eAmk(Amk[p],m,k); Eigen::Map eBkn(Bkn[p],k,n); Eigen::Map eCmn(Cmn[p],m,n); eCmn = beta * eCmn + alpha * eAmk * eBkn ; }); } else if ( (OpA == GridBLAS_OP_C ) && (OpB == GridBLAS_OP_N) ) { thread_for (p, batchCount, { Eigen::Map eAmk(Amk[p],k,m); Eigen::Map eBkn(Bkn[p],k,n); Eigen::Map eCmn(Cmn[p],m,n); eCmn = beta * eCmn + alpha * eAmk.adjoint() * eBkn ; }); } else if ( (OpA == GridBLAS_OP_N ) && (OpB == GridBLAS_OP_C) ) { thread_for (p, batchCount, { Eigen::Map eAmk(Amk[p],m,k); Eigen::Map eBkn(Bkn[p],n,k); Eigen::Map eCmn(Cmn[p],m,n); eCmn = beta * eCmn + alpha * eAmk * eBkn.adjoint() ; }); } else if ( (OpA == GridBLAS_OP_C ) && (OpB == GridBLAS_OP_C) ) { thread_for (p, batchCount, { Eigen::Map eAmk(Amk[p],k,m); Eigen::Map eBkn(Bkn[p],n,k); Eigen::Map eCmn(Cmn[p],m,n); eCmn = beta * eCmn + alpha * eAmk.adjoint() * eBkn.adjoint() ; } ); } else { assert(0); } #endif RealD t1=usecond(); RealD flops = 8.0*m*n*k*batchCount; RealD bytes = 1.0*sizeof(ComplexD)*(m*k+k*n+m*n)*batchCount; // std::cout < &Amk, // pointer list to matrices deviceVector &Bkn, ComplexF beta, deviceVector &Cmn) { RealD t2=usecond(); int32_t batchCount = Amk.size(); assert(OpA!=GridBLAS_OP_T); // Complex case expect no transpose assert(OpB!=GridBLAS_OP_T); int lda = m; // m x k column major int ldb = k; // k x n column major int ldc = m; // m x b column major if(OpA!=GridBLAS_OP_N) lda = k; if(OpB!=GridBLAS_OP_N) ldb = n; static deviceVector alpha_p(1); static deviceVector beta_p(1); // can prestore the 1 and the zero on device acceleratorCopyToDevice((void *)&alpha,(void *)&alpha_p[0],sizeof(ComplexF)); acceleratorCopyToDevice((void *)&beta ,(void *)&beta_p[0],sizeof(ComplexF)); RealD t0=usecond(); assert(Bkn.size()==batchCount); assert(Cmn.size()==batchCount); #ifdef GRID_HIP hipblasOperation_t hOpA; hipblasOperation_t hOpB; if ( OpA == GridBLAS_OP_N ) hOpA = HIPBLAS_OP_N; if ( OpA == GridBLAS_OP_T ) hOpA = HIPBLAS_OP_T; if ( OpA == GridBLAS_OP_C ) hOpA = HIPBLAS_OP_C; if ( OpB == GridBLAS_OP_N ) hOpB = HIPBLAS_OP_N; if ( OpB == GridBLAS_OP_T ) hOpB = HIPBLAS_OP_T; if ( OpB == GridBLAS_OP_C ) hOpB = HIPBLAS_OP_C; auto err = hipblasCgemmBatched(gridblasHandle, hOpA, hOpB, m,n,k, (hipblasComplex *) &alpha_p[0], (hipblasComplex **)&Amk[0], lda, (hipblasComplex **)&Bkn[0], ldb, (hipblasComplex *) &beta_p[0], (hipblasComplex **)&Cmn[0], ldc, batchCount); assert(err==HIPBLAS_STATUS_SUCCESS); #endif #ifdef GRID_CUDA cublasOperation_t hOpA; cublasOperation_t hOpB; if ( OpA == GridBLAS_OP_N ) hOpA = CUBLAS_OP_N; if ( OpA == GridBLAS_OP_T ) hOpA = CUBLAS_OP_T; if ( OpA == GridBLAS_OP_C ) hOpA = CUBLAS_OP_C; if ( OpB == GridBLAS_OP_N ) hOpB = CUBLAS_OP_N; if ( OpB == GridBLAS_OP_T ) hOpB = CUBLAS_OP_T; if ( OpB == GridBLAS_OP_C ) hOpB = CUBLAS_OP_C; auto err = cublasCgemmBatched(gridblasHandle, hOpA, hOpB, m,n,k, (cuComplex *) &alpha_p[0], (cuComplex **)&Amk[0], lda, (cuComplex **)&Bkn[0], ldb, (cuComplex *) &beta_p[0], (cuComplex **)&Cmn[0], ldc, batchCount); assert(err==CUBLAS_STATUS_SUCCESS); #endif #ifdef GRID_SYCL int64_t m64=m; int64_t n64=n; int64_t k64=k; int64_t lda64=lda; int64_t ldb64=ldb; int64_t ldc64=ldc; int64_t batchCount64=batchCount; oneapi::mkl::transpose iOpA; oneapi::mkl::transpose iOpB; if ( OpA == GridBLAS_OP_N ) iOpA = oneapi::mkl::transpose::N; if ( OpA == GridBLAS_OP_T ) iOpA = oneapi::mkl::transpose::T; if ( OpA == GridBLAS_OP_C ) iOpA = oneapi::mkl::transpose::C; if ( OpB == GridBLAS_OP_N ) iOpB = oneapi::mkl::transpose::N; if ( OpB == GridBLAS_OP_T ) iOpB = oneapi::mkl::transpose::T; if ( OpB == GridBLAS_OP_C ) iOpB = oneapi::mkl::transpose::C; oneapi::mkl::blas::column_major::gemm_batch(*gridblasHandle, &iOpA, &iOpB, &m64,&n64,&k64, (ComplexF *) &alpha_p[0], (const ComplexF **)&Amk[0], (const int64_t *)&lda64, (const ComplexF **)&Bkn[0], (const int64_t *)&ldb64, (ComplexF *) &beta_p[0], (ComplexF **)&Cmn[0], (const int64_t *)&ldc64, (int64_t)1,&batchCount64,std::vector()); synchronise(); #endif #if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP) // Need a default/reference implementation; use Eigen if ( (OpA == GridBLAS_OP_N ) && (OpB == GridBLAS_OP_N) ) { thread_for (p, batchCount, { Eigen::Map eAmk(Amk[p],m,k); Eigen::Map eBkn(Bkn[p],k,n); Eigen::Map eCmn(Cmn[p],m,n); eCmn = beta * eCmn + alpha * eAmk * eBkn ; }); } else if ( (OpA == GridBLAS_OP_C ) && (OpB == GridBLAS_OP_N) ) { thread_for (p, batchCount, { Eigen::Map eAmk(Amk[p],k,m); Eigen::Map eBkn(Bkn[p],k,n); Eigen::Map eCmn(Cmn[p],m,n); eCmn = beta * eCmn + alpha * eAmk.adjoint() * eBkn ; }); } else if ( (OpA == GridBLAS_OP_N ) && (OpB == GridBLAS_OP_C) ) { thread_for (p, batchCount, { Eigen::Map eAmk(Amk[p],m,k); Eigen::Map eBkn(Bkn[p],n,k); Eigen::Map eCmn(Cmn[p],m,n); eCmn = beta * eCmn + alpha * eAmk * eBkn.adjoint() ; }); } else if ( (OpA == GridBLAS_OP_C ) && (OpB == GridBLAS_OP_C) ) { thread_for (p, batchCount, { Eigen::Map eAmk(Amk[p],k,m); Eigen::Map eBkn(Bkn[p],n,k); Eigen::Map eCmn(Cmn[p],m,n); eCmn = beta * eCmn + alpha * eAmk.adjoint() * eBkn.adjoint() ; } ); } else { assert(0); } #endif RealD t1=usecond(); RealD flops = 8.0*m*n*k*batchCount; RealD bytes = 1.0*sizeof(ComplexF)*(m*k+k*n+m*n)*batchCount; } template double benchmark(int M, int N, int K, int BATCH) { int32_t N_A = M*K*BATCH; int32_t N_B = K*N*BATCH; int32_t N_C = M*N*BATCH; deviceVector A(N_A); acceleratorMemSet(&A[0],0,N_A*sizeof(CComplex)); deviceVector B(N_B); acceleratorMemSet(&B[0],0,N_B*sizeof(CComplex)); deviceVector C(N_C); acceleratorMemSet(&C[0],0,N_C*sizeof(CComplex)); CComplex alpha(1.0); CComplex beta (1.0); RealD flops = 8.0*M*N*K*BATCH; int ncall=10; deviceVector As(BATCH); deviceVector Bs(BATCH); deviceVector Cs(BATCH); for(int b = 0 ; b < BATCH;b++) { CComplex *ptr; ptr = &A[b*M*K]; acceleratorPut(As[b],ptr); ptr = &B[b*K*N]; acceleratorPut(Bs[b],ptr); ptr = &C[b*M*N]; acceleratorPut(Cs[b],ptr); } gemmBatched(M,N,K, alpha, As, // m x k Bs, // k x n beta, Cs); synchronise(); RealD t0 = usecond(); for(int i=0;i double benchmark(int M, int N, int K) { int32_t N_A = M*K; int32_t N_B = K*N; int32_t N_C = M*N; deviceVector A(N_A); acceleratorMemSet(&A[0],0,N_A*sizeof(CComplex)); deviceVector B(N_B); acceleratorMemSet(&B[0],0,N_B*sizeof(CComplex)); deviceVector C(N_C); acceleratorMemSet(&C[0],0,N_C*sizeof(CComplex)); CComplex alpha(1.0); CComplex beta (1.0); RealD flops = 8.0*M*N*K; int ncall=10; gemm(GridBLAS_OP_C,GridBLAS_OP_N, M,N,K, alpha, &A[0], // m x k &B[0], // k x n beta, &C[0]); synchronise(); RealD t0 = usecond(); for(int i=0;i static void BLAS(void) { //int nbasis, int nrhs, int coarseVol int basis[] = { 16,32,64 }; int rhs[] = { 8,12,16 }; int vol = 8*8*8*8; int blk = 4*4*4*4; GridBLAS blas; int fpbits = sizeof(CComplex)*4; std::cout<< "=================================================================================="<(M,N,K,BATCH); fprintf(FP,"%d, %d, %d, %d, %f\n", M, N, K, BATCH, p); std::cout<< M<<"\t\t"<(M,N,K,BATCH); fprintf(FP,"%d, %d, %d, %d, %f\n", M, N, K, BATCH, p); std::cout<< M<<"\t\t"<(M,N,K,BATCH); fprintf(FP,"%d, %d, %d, %d, %f\n", M, N, K, BATCH, p); std::cout<< M<<"\t\t"< ks({4*1024*1024, 2*1024*1024, 1024*1024, 256*1024, 1024 }); for( int kk=0;kk(M,N,K); fprintf(FP,"%d, %d, %d, %d, %f\n", M, N, K, 1, p); std::cout<< M<<"\t\t"<(); BLAS(); fclose(FP); }