Add tuned benchmark kernels

Co-authored-by: Nick Curtis <arghdos@users.noreply.github.com>
This commit is contained in:
Thomas Gibson 2022-04-30 21:59:45 -05:00
parent 2c5eee4840
commit a075455ad4
3 changed files with 212 additions and 46 deletions

View File

@ -9,7 +9,32 @@
#include "hip/hip_runtime.h"
#define TBSIZE 1024
#define DOT_NUM_BLOCKS 256
#ifdef NONTEMPORAL
template<typename T>
__device__ __forceinline__ T load(const T& ref)
{
return __builtin_nontemporal_load(&ref);
}
template<typename T>
__device__ __forceinline__ void store(const T& value, T& ref)
{
__builtin_nontemporal_store(value, &ref);
}
#else
template<typename T>
__device__ __forceinline__ T load(const T& ref)
{
return ref;
}
template<typename T>
__device__ __forceinline__ void store(const T& value, T& ref)
{
ref = value;
}
#endif
void check_error(void)
{
@ -23,15 +48,27 @@ void check_error(void)
template <class T>
HIPStream<T>::HIPStream(const int ARRAY_SIZE, const int device_index)
: array_size{ARRAY_SIZE},
block_count(array_size / (TBSIZE * elements_per_lane * chunks_per_block))
{
// The array size must be divisible by TBSIZE for kernel launches
if (ARRAY_SIZE % TBSIZE != 0)
std::cerr << "Elements per lane: " << elements_per_lane << std::endl;
std::cerr << "Chunks per block: " << chunks_per_block << std::endl;
// The array size must be divisible by total number of elements
// moved per block for kernel launches
if (ARRAY_SIZE % (TBSIZE * elements_per_lane * chunks_per_block) != 0)
{
std::stringstream ss;
ss << "Array size must be a multiple of " << TBSIZE;
ss << "Array size must be a multiple of elements operated on per block ("
<< TBSIZE * elements_per_lane * chunks_per_block
<< ").";
throw std::runtime_error(ss.str());
}
std::cerr << "block count " << block_count << std::endl;
#ifdef NONTEMPORAL
std::cerr << "Using non-temporal memory operations." << std::endl;
#endif
// Set device
int count;
@ -49,7 +86,7 @@ HIPStream<T>::HIPStream(const int ARRAY_SIZE, const int device_index)
array_size = ARRAY_SIZE;
// Allocate the host array for partial sums for dot kernels
sums = (T*)malloc(sizeof(T) * DOT_NUM_BLOCKS);
sums = (T*)malloc(block_count*sizeof(T));
// Check buffers fit on the device
hipDeviceProp_t props;
@ -64,7 +101,7 @@ HIPStream<T>::HIPStream(const int ARRAY_SIZE, const int device_index)
check_error();
hipMalloc(&d_c, ARRAY_SIZE*sizeof(T));
check_error();
hipMalloc(&d_sum, DOT_NUM_BLOCKS*sizeof(T));
hipMalloc(&d_sum, block_count*sizeof(T));
check_error();
}
@ -115,68 +152,115 @@ void HIPStream<T>::read_arrays(std::vector<T>& a, std::vector<T>& b, std::vector
check_error();
}
template <typename T>
__global__ void copy_kernel(const T * a, T * c)
template <size_t elements_per_lane, size_t chunks_per_block, typename T>
__launch_bounds__(TBSIZE)
__global__
void copy_kernel(const T * __restrict a, T * __restrict c)
{
const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x;
c[i] = a[i];
const size_t dx = (blockDim.x * gridDim.x) * elements_per_lane;
const size_t gidx = (threadIdx.x + blockIdx.x * blockDim.x) * elements_per_lane;
for (size_t i = 0; i != chunks_per_block; ++i)
{
for (size_t j = 0; j != elements_per_lane; ++j)
{
store(load(a[gidx + i * dx + j]), c[gidx + i * dx + j]);
}
}
}
template <class T>
void HIPStream<T>::copy()
{
hipLaunchKernelGGL(HIP_KERNEL_NAME(copy_kernel<T>), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_a, d_c);
hipLaunchKernelGGL(HIP_KERNEL_NAME(copy_kernel<elements_per_lane, chunks_per_block, T>),
dim3(block_count),
dim3(TBSIZE),
0, 0, d_a, d_c);
check_error();
hipDeviceSynchronize();
check_error();
}
template <typename T>
__global__ void mul_kernel(T * b, const T * c)
template <size_t elements_per_lane, size_t chunks_per_block, typename T>
__launch_bounds__(TBSIZE)
__global__
void mul_kernel(T * __restrict b, const T * __restrict c)
{
const T scalar = startScalar;
const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x;
b[i] = scalar * c[i];
const size_t dx = (blockDim.x * gridDim.x) * elements_per_lane;
const size_t gidx = (threadIdx.x + blockIdx.x * blockDim.x) * elements_per_lane;
for (size_t i = 0; i != chunks_per_block; ++i)
{
for (size_t j = 0; j != elements_per_lane; ++j)
{
store(scalar * load(c[gidx + i * dx + j]), b[gidx + i * dx + j]);
}
}
}
template <class T>
void HIPStream<T>::mul()
{
hipLaunchKernelGGL(HIP_KERNEL_NAME(mul_kernel<T>), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_b, d_c);
hipLaunchKernelGGL(HIP_KERNEL_NAME(mul_kernel<elements_per_lane, chunks_per_block, T>),
dim3(block_count),
dim3(TBSIZE),
0, 0, d_b, d_c);
check_error();
hipDeviceSynchronize();
check_error();
}
template <typename T>
__global__ void add_kernel(const T * a, const T * b, T * c)
template <size_t elements_per_lane, size_t chunks_per_block, typename T>
__launch_bounds__(TBSIZE)
__global__
void add_kernel(const T * __restrict a, const T * __restrict b, T * __restrict c)
{
const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x;
c[i] = a[i] + b[i];
const size_t dx = (blockDim.x * gridDim.x) * elements_per_lane;
const size_t gidx = (threadIdx.x + blockIdx.x * blockDim.x) * elements_per_lane;
for (size_t i = 0; i != chunks_per_block; ++i)
{
for (size_t j = 0; j != elements_per_lane; ++j)
{
store(load(a[gidx + i * dx + j]) + load(b[gidx + i * dx + j]), c[gidx + i * dx + j]);
}
}
}
template <class T>
void HIPStream<T>::add()
{
hipLaunchKernelGGL(HIP_KERNEL_NAME(add_kernel<T>), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_a, d_b, d_c);
hipLaunchKernelGGL(HIP_KERNEL_NAME(add_kernel<elements_per_lane, chunks_per_block, T>),
dim3(block_count),
dim3(TBSIZE),
0, 0, d_a, d_b, d_c);
check_error();
hipDeviceSynchronize();
check_error();
}
template <typename T>
__global__ void triad_kernel(T * a, const T * b, const T * c)
template <size_t elements_per_lane, size_t chunks_per_block, typename T>
__launch_bounds__(TBSIZE)
__global__
void triad_kernel(T * __restrict a, const T * __restrict b, const T * __restrict c)
{
const T scalar = startScalar;
const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x;
a[i] = b[i] + scalar * c[i];
const size_t dx = (blockDim.x * gridDim.x) * elements_per_lane;
const size_t gidx = (threadIdx.x + blockIdx.x * blockDim.x) * elements_per_lane;
for (size_t i = 0; i != chunks_per_block; ++i)
{
for (size_t j = 0; j != elements_per_lane; ++j)
{
store(load(b[gidx + i * dx + j]) + scalar * load(c[gidx + i * dx + j]), a[gidx + i * dx + j]);
}
}
}
template <class T>
void HIPStream<T>::triad()
{
hipLaunchKernelGGL(HIP_KERNEL_NAME(triad_kernel<T>), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_a, d_b, d_c);
hipLaunchKernelGGL(HIP_KERNEL_NAME(triad_kernel<elements_per_lane, chunks_per_block, T>),
dim3(block_count),
dim3(TBSIZE),
0, 0, d_a, d_b, d_c);
check_error();
hipDeviceSynchronize();
check_error();
@ -199,42 +283,78 @@ void HIPStream<T>::nstream()
check_error();
}
template <class T>
__global__ void dot_kernel(const T * a, const T * b, T * sum, int array_size)
template<unsigned int n = TBSIZE>
struct Reducer
{
template<typename I>
__device__
static
void reduce(I it) noexcept
{
if (n == 1) return;
#if defined(__HIP_PLATFORM_NVCC__)
constexpr unsigned int warpSize = 32;
#endif
constexpr bool is_same_warp{n <= warpSize * 2};
if (static_cast<int>(threadIdx.x) < n/2)
{
it[threadIdx.x] += it[threadIdx.x + n/2];
}
is_same_warp ? __threadfence_block() : __syncthreads();
Reducer<n/2>::reduce(it);
}
};
template<>
struct Reducer<1u> {
template<typename I>
__device__
static
void reduce(I) noexcept
{}
};
template <size_t elements_per_lane, size_t chunks_per_block, typename T>
__launch_bounds__(TBSIZE)
__global__
__global__ void dot_kernel(const T * __restrict a, const T * __restrict b, T * __restrict sum)
{
__shared__ T tb_sum[TBSIZE];
const size_t tidx = threadIdx.x;
const size_t dx = (blockDim.x * gridDim.x) * elements_per_lane;
const size_t gidx = (tidx + blockIdx.x * blockDim.x) * elements_per_lane;
int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x;
const size_t local_i = hipThreadIdx_x;
tb_sum[local_i] = 0.0;
for (; i < array_size; i += hipBlockDim_x*hipGridDim_x)
tb_sum[local_i] += a[i] * b[i];
for (int offset = hipBlockDim_x / 2; offset > 0; offset /= 2)
T tmp{0};
for (size_t i = 0; i != chunks_per_block; ++i)
{
__syncthreads();
if (local_i < offset)
for (size_t j = 0; j != elements_per_lane; ++j)
{
tb_sum[local_i] += tb_sum[local_i+offset];
tmp += load(a[gidx + i * dx + j]) * load(b[gidx + i * dx + j]);
}
}
tb_sum[tidx] = tmp;
__syncthreads();
if (local_i == 0)
sum[hipBlockIdx_x] = tb_sum[local_i];
Reducer<>::reduce(tb_sum);
if (tidx) return;
store(tb_sum[0], sum[blockIdx.x]);
}
template <class T>
T HIPStream<T>::dot()
{
hipLaunchKernelGGL(HIP_KERNEL_NAME(dot_kernel<T>), dim3(DOT_NUM_BLOCKS), dim3(TBSIZE), 0, 0, d_a, d_b, d_sum, array_size);
hipLaunchKernelGGL(HIP_KERNEL_NAME(dot_kernel<elements_per_lane, chunks_per_block, T>),
dim3(block_count),
dim3(TBSIZE),
0, 0, d_a, d_b, d_sum);
check_error();
hipMemcpy(sums, d_sum, DOT_NUM_BLOCKS*sizeof(T), hipMemcpyDeviceToHost);
hipMemcpy(sums, d_sum, block_count*sizeof(T), hipMemcpyDeviceToHost);
check_error();
T sum = 0.0;
for (int i = 0; i < DOT_NUM_BLOCKS; i++)
for (int i = 0; i < block_count; i++)
sum += sums[i];
return sum;

View File

@ -18,9 +18,42 @@
template <class T>
class HIPStream : public Stream<T>
{
#ifdef __HIP_PLATFORM_NVCC__
#ifndef DWORDS_PER_LANE
#define DWORDS_PER_LANE 1
#endif
#ifndef CHUNKS_PER_BLOCK
#define CHUNKS_PER_BLOCK 8
#endif
#else
#ifndef DWORDS_PER_LANE
#define DWORDS_PER_LANE 4
#endif
#ifndef CHUNKS_PER_BLOCK
#define CHUNKS_PER_BLOCK 1
#endif
#endif
// Make sure that either:
// DWORDS_PER_LANE is less than sizeof(T), in which case we default to 1 element
// or
// DWORDS_PER_LANE is divisible by sizeof(T)
static_assert((DWORDS_PER_LANE * sizeof(unsigned int) < sizeof(T)) ||
(DWORDS_PER_LANE * sizeof(unsigned int) % sizeof(T) == 0),
"DWORDS_PER_LANE not divisible by sizeof(element_type)");
static constexpr unsigned int chunks_per_block{CHUNKS_PER_BLOCK};
static constexpr unsigned int dwords_per_lane{DWORDS_PER_LANE};
// Take into account the datatype size
// That is, if we specify 4 DWORDS_PER_LANE, this is 2 FP64 elements
// and 4 FP32 elements
static constexpr unsigned int elements_per_lane{
(DWORDS_PER_LANE * sizeof(unsigned int)) < sizeof(T) ? 1 : (
DWORDS_PER_LANE * sizeof(unsigned int) / sizeof(T))};
protected:
// Size of arrays
int array_size;
int block_count;
// Host array for partial sums for dot kernel
T *sums;

View File

@ -2,6 +2,19 @@
register_flag_required(CMAKE_CXX_COMPILER
"Absolute path to the AMD HIP C++ compiler")
register_flag_optional(USE_NONTEMPORAL_MEM
"Flag indicating to use non-temporal memory accesses to bypass cache."
"OFF")
# TODO: Better flag descriptions
register_flag_optional(DWORDS_PER_LANE "Flag indicating the number of double data types per wavefront lane." 4)
register_flag_optional(CHUNKS_PER_BLOCK "Flag indicating the chunks per block." 1)
macro(setup)
# nothing to do here as hipcc does everything correctly, what a surprise!
# Ensure we set the proper preprocessor directives
if (USE_NONTEMPORAL_MEM)
add_definitions(-DNONTEMPORAL)
endif ()
register_definitions(DWORDS_PER_LANE=${DWORDS_PER_LANE})
register_definitions(CHUNKS_PER_BLOCK=${CHUNKS_PER_BLOCK})
endmacro()