Merge pull request #139 from thomasgibson/updated-hip-kernels

Updated hip kernels
This commit is contained in:
Tom Deakin 2023-05-02 15:18:08 +01:00 committed by GitHub
commit ebb1176a20
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 50 additions and 32 deletions

View File

@ -9,7 +9,7 @@
#include "hip/hip_runtime.h" #include "hip/hip_runtime.h"
#define TBSIZE 1024 #define TBSIZE 1024
#define DOT_NUM_BLOCKS 256
void check_error(void) void check_error(void)
{ {
@ -47,9 +47,15 @@ HIPStream<T>::HIPStream(const int ARRAY_SIZE, const int device_index)
std::cout << "Driver: " << getDeviceDriver(device_index) << std::endl; std::cout << "Driver: " << getDeviceDriver(device_index) << std::endl;
array_size = ARRAY_SIZE; array_size = ARRAY_SIZE;
// Round dot_num_blocks up to next multiple of (TBSIZE * dot_elements_per_lane)
dot_num_blocks = (array_size + (TBSIZE * dot_elements_per_lane - 1)) / (TBSIZE * dot_elements_per_lane);
// Allocate the host array for partial sums for dot kernels // Allocate the host array for partial sums for dot kernels using hipHostMalloc.
sums = (T*)malloc(sizeof(T) * DOT_NUM_BLOCKS); // This creates an array on the host which is visible to the device. However, it requires
// synchronization (e.g. hipDeviceSynchronize) for the result to be available on the host
// after it has been passed through to a kernel.
hipHostMalloc(&sums, sizeof(T) * dot_num_blocks, hipHostMallocNonCoherent);
check_error();
// Check buffers fit on the device // Check buffers fit on the device
hipDeviceProp_t props; hipDeviceProp_t props;
@ -64,15 +70,14 @@ HIPStream<T>::HIPStream(const int ARRAY_SIZE, const int device_index)
check_error(); check_error();
hipMalloc(&d_c, ARRAY_SIZE*sizeof(T)); hipMalloc(&d_c, ARRAY_SIZE*sizeof(T));
check_error(); check_error();
hipMalloc(&d_sum, DOT_NUM_BLOCKS*sizeof(T));
check_error();
} }
template <class T> template <class T>
HIPStream<T>::~HIPStream() HIPStream<T>::~HIPStream()
{ {
free(sums); hipHostFree(sums);
check_error();
hipFree(d_a); hipFree(d_a);
check_error(); check_error();
@ -80,15 +85,13 @@ HIPStream<T>::~HIPStream()
check_error(); check_error();
hipFree(d_c); hipFree(d_c);
check_error(); check_error();
hipFree(d_sum);
check_error();
} }
template <typename T> template <typename T>
__global__ void init_kernel(T * a, T * b, T * c, T initA, T initB, T initC) __global__ void init_kernel(T * a, T * b, T * c, T initA, T initB, T initC)
{ {
const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; const size_t i = blockDim.x * blockIdx.x + threadIdx.x;
a[i] = initA; a[i] = initA;
b[i] = initB; b[i] = initB;
c[i] = initC; c[i] = initC;
@ -97,7 +100,7 @@ __global__ void init_kernel(T * a, T * b, T * c, T initA, T initB, T initC)
template <class T> template <class T>
void HIPStream<T>::init_arrays(T initA, T initB, T initC) void HIPStream<T>::init_arrays(T initA, T initB, T initC)
{ {
hipLaunchKernelGGL(HIP_KERNEL_NAME(init_kernel<T>), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_a, d_b, d_c, initA, initB, initC); init_kernel<T><<<dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0>>>(d_a, d_b, d_c, initA, initB, initC);
check_error(); check_error();
hipDeviceSynchronize(); hipDeviceSynchronize();
check_error(); check_error();
@ -115,18 +118,17 @@ void HIPStream<T>::read_arrays(std::vector<T>& a, std::vector<T>& b, std::vector
check_error(); check_error();
} }
template <typename T> template <typename T>
__global__ void copy_kernel(const T * a, T * c) __global__ void copy_kernel(const T * a, T * c)
{ {
const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; const size_t i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i]; c[i] = a[i];
} }
template <class T> template <class T>
void HIPStream<T>::copy() void HIPStream<T>::copy()
{ {
hipLaunchKernelGGL(HIP_KERNEL_NAME(copy_kernel<T>), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_a, d_c); copy_kernel<T><<<dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0>>>(d_a, d_c);
check_error(); check_error();
hipDeviceSynchronize(); hipDeviceSynchronize();
check_error(); check_error();
@ -136,14 +138,14 @@ template <typename T>
__global__ void mul_kernel(T * b, const T * c) __global__ void mul_kernel(T * b, const T * c)
{ {
const T scalar = startScalar; const T scalar = startScalar;
const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; const size_t i = threadIdx.x + blockIdx.x * blockDim.x;
b[i] = scalar * c[i]; b[i] = scalar * c[i];
} }
template <class T> template <class T>
void HIPStream<T>::mul() void HIPStream<T>::mul()
{ {
hipLaunchKernelGGL(HIP_KERNEL_NAME(mul_kernel<T>), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_b, d_c); mul_kernel<T><<<dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0>>>(d_b, d_c);
check_error(); check_error();
hipDeviceSynchronize(); hipDeviceSynchronize();
check_error(); check_error();
@ -152,14 +154,14 @@ void HIPStream<T>::mul()
template <typename T> template <typename T>
__global__ void add_kernel(const T * a, const T * b, T * c) __global__ void add_kernel(const T * a, const T * b, T * c)
{ {
const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; const size_t i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] + b[i]; c[i] = a[i] + b[i];
} }
template <class T> template <class T>
void HIPStream<T>::add() void HIPStream<T>::add()
{ {
hipLaunchKernelGGL(HIP_KERNEL_NAME(add_kernel<T>), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_a, d_b, d_c); add_kernel<T><<<dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0>>>(d_a, d_b, d_c);
check_error(); check_error();
hipDeviceSynchronize(); hipDeviceSynchronize();
check_error(); check_error();
@ -169,14 +171,14 @@ template <typename T>
__global__ void triad_kernel(T * a, const T * b, const T * c) __global__ void triad_kernel(T * a, const T * b, const T * c)
{ {
const T scalar = startScalar; const T scalar = startScalar;
const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; const size_t i = threadIdx.x + blockIdx.x * blockDim.x;
a[i] = b[i] + scalar * c[i]; a[i] = b[i] + scalar * c[i];
} }
template <class T> template <class T>
void HIPStream<T>::triad() void HIPStream<T>::triad()
{ {
hipLaunchKernelGGL(HIP_KERNEL_NAME(triad_kernel<T>), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_a, d_b, d_c); triad_kernel<T><<<dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0>>>(d_a, d_b, d_c);
check_error(); check_error();
hipDeviceSynchronize(); hipDeviceSynchronize();
check_error(); check_error();
@ -186,32 +188,32 @@ template <typename T>
__global__ void nstream_kernel(T * a, const T * b, const T * c) __global__ void nstream_kernel(T * a, const T * b, const T * c)
{ {
const T scalar = startScalar; const T scalar = startScalar;
const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; const size_t i = threadIdx.x + blockIdx.x * blockDim.x;
a[i] += b[i] + scalar * c[i]; a[i] += b[i] + scalar * c[i];
} }
template <class T> template <class T>
void HIPStream<T>::nstream() void HIPStream<T>::nstream()
{ {
hipLaunchKernelGGL(HIP_KERNEL_NAME(nstream_kernel<T>), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_a, d_b, d_c); nstream_kernel<T><<<dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0>>>(d_a, d_b, d_c);
check_error(); check_error();
hipDeviceSynchronize(); hipDeviceSynchronize();
check_error(); check_error();
} }
template <class T> template <typename T>
__global__ void dot_kernel(const T * a, const T * b, T * sum, int array_size) __global__ void dot_kernel(const T * a, const T * b, T * sum, int array_size)
{ {
__shared__ T tb_sum[TBSIZE]; __shared__ T tb_sum[TBSIZE];
int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; const size_t local_i = threadIdx.x;
const size_t local_i = hipThreadIdx_x; size_t i = blockDim.x * blockIdx.x + local_i;
tb_sum[local_i] = 0.0; tb_sum[local_i] = 0.0;
for (; i < array_size; i += hipBlockDim_x*hipGridDim_x) for (; i < array_size; i += blockDim.x*gridDim.x)
tb_sum[local_i] += a[i] * b[i]; tb_sum[local_i] += a[i] * b[i];
for (int offset = hipBlockDim_x / 2; offset > 0; offset /= 2) for (size_t offset = blockDim.x / 2; offset > 0; offset /= 2)
{ {
__syncthreads(); __syncthreads();
if (local_i < offset) if (local_i < offset)
@ -221,20 +223,19 @@ __global__ void dot_kernel(const T * a, const T * b, T * sum, int array_size)
} }
if (local_i == 0) if (local_i == 0)
sum[hipBlockIdx_x] = tb_sum[local_i]; sum[blockIdx.x] = tb_sum[local_i];
} }
template <class T> template <class T>
T HIPStream<T>::dot() T HIPStream<T>::dot()
{ {
hipLaunchKernelGGL(HIP_KERNEL_NAME(dot_kernel<T>), dim3(DOT_NUM_BLOCKS), dim3(TBSIZE), 0, 0, d_a, d_b, d_sum, array_size); dot_kernel<T><<<dim3(dot_num_blocks), dim3(TBSIZE), 0, 0>>>(d_a, d_b, sums, array_size);
check_error(); check_error();
hipDeviceSynchronize();
hipMemcpy(sums, d_sum, DOT_NUM_BLOCKS*sizeof(T), hipMemcpyDeviceToHost);
check_error(); check_error();
T sum = 0.0; T sum = 0.0;
for (int i = 0; i < DOT_NUM_BLOCKS; i++) for (int i = 0; i < dot_num_blocks; i++)
sum += sums[i]; sum += sums[i];
return sum; return sum;

View File

@ -14,13 +14,31 @@
#include "Stream.h" #include "Stream.h"
#define IMPLEMENTATION_STRING "HIP" #define IMPLEMENTATION_STRING "HIP"
#define DOT_READ_DWORDS_PER_LANE 4
template <class T> template <class T>
class HIPStream : public Stream<T> class HIPStream : public Stream<T>
{ {
// Make sure that either:
// DOT_READ_DWORDS_PER_LANE is less than sizeof(T), in which case we default to 1 element
// or
// DOT_READ_DWORDS_PER_LANE is divisible by sizeof(T)
static_assert((DOT_READ_DWORDS_PER_LANE * sizeof(unsigned int) < sizeof(T)) ||
(DOT_READ_DWORDS_PER_LANE * sizeof(unsigned int) % sizeof(T) == 0),
"DOT_READ_DWORDS_PER_LANE not divisible by sizeof(element_type)");
// Take into account the datatype size
// That is, for 4 DOT_READ_DWORDS_PER_LANE, this is 2 FP64 elements
// and 4 FP32 elements
static constexpr unsigned int dot_elements_per_lane{
(DOT_READ_DWORDS_PER_LANE * sizeof(unsigned int)) < sizeof(T) ? 1 : (
DOT_READ_DWORDS_PER_LANE * sizeof(unsigned int) / sizeof(T))};
protected: protected:
// Size of arrays // Size of arrays
int array_size; int array_size;
int dot_num_blocks;
// Host array for partial sums for dot kernel // Host array for partial sums for dot kernel
T *sums; T *sums;
@ -29,7 +47,6 @@ class HIPStream : public Stream<T>
T *d_a; T *d_a;
T *d_b; T *d_b;
T *d_c; T *d_c;
T *d_sum;
public: public: