diff --git a/Grid/communicator/SharedMemoryMPI.cc b/Grid/communicator/SharedMemoryMPI.cc index 89a9d316..64a86c4b 100644 --- a/Grid/communicator/SharedMemoryMPI.cc +++ b/Grid/communicator/SharedMemoryMPI.cc @@ -604,8 +604,8 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) #ifdef GRID_SYCL_LEVEL_ZERO_IPC typedef struct { int fd; pid_t pid ; ze_ipc_mem_handle_t ze; } clone_mem_t; - auto zeDevice = cl::sycl::get_native(theGridAccelerator->get_device()); - auto zeContext = cl::sycl::get_native(theGridAccelerator->get_context()); + auto zeDevice = cl::sycl::get_native(theGridAccelerator->get_device()); + auto zeContext = cl::sycl::get_native(theGridAccelerator->get_context()); ze_ipc_mem_handle_t ihandle; clone_mem_t handle; diff --git a/Grid/tensors/Tensor_Ta.h b/Grid/tensors/Tensor_Ta.h index 90e57b2b..7deec77a 100644 --- a/Grid/tensors/Tensor_Ta.h +++ b/Grid/tensors/Tensor_Ta.h @@ -90,10 +90,12 @@ template accelerator_inline iVector ProjectOnGroup(c template::TensorLevel == 0 >::type * =nullptr> accelerator_inline iMatrix ProjectOnGroup(const iMatrix &arg) { + typedef typename iMatrix::scalar_type scalar; // need a check for the group type? iMatrix ret(arg); vtype nrm; vtype inner; + scalar one(1.0); for(int c1=0;c1 ProjectOnGroup(const iMatrix &arg) inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]); nrm = sqrt(inner); - nrm = 1.0/nrm; + nrm = one/nrm; for(int c2=0;c2 ProjectOnGroup(const iMatrix &arg) inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]); nrm = sqrt(inner); - nrm = 1.0/nrm; + nrm = one/nrm; for(int c2=0;c2