From a075455ad45f00be73b10cc54a8369397221c691 Mon Sep 17 00:00:00 2001 From: Thomas Gibson <14180421+thomasgibson@users.noreply.github.com> Date: Sat, 30 Apr 2022 21:59:45 -0500 Subject: [PATCH] Add tuned benchmark kernels Co-authored-by: Nick Curtis --- src/hip/HIPStream.cpp | 210 +++++++++++++++++++++++++++++++++--------- src/hip/HIPStream.h | 33 +++++++ src/hip/model.cmake | 15 ++- 3 files changed, 212 insertions(+), 46 deletions(-) diff --git a/src/hip/HIPStream.cpp b/src/hip/HIPStream.cpp index 6aed1ee..dcf634e 100644 --- a/src/hip/HIPStream.cpp +++ b/src/hip/HIPStream.cpp @@ -9,7 +9,32 @@ #include "hip/hip_runtime.h" #define TBSIZE 1024 -#define DOT_NUM_BLOCKS 256 + +#ifdef NONTEMPORAL +template +__device__ __forceinline__ T load(const T& ref) +{ + return __builtin_nontemporal_load(&ref); +} + +template +__device__ __forceinline__ void store(const T& value, T& ref) +{ + __builtin_nontemporal_store(value, &ref); +} +#else +template +__device__ __forceinline__ T load(const T& ref) +{ + return ref; +} + +template +__device__ __forceinline__ void store(const T& value, T& ref) +{ + ref = value; +} +#endif void check_error(void) { @@ -23,15 +48,27 @@ void check_error(void) template HIPStream::HIPStream(const int ARRAY_SIZE, const int device_index) + : array_size{ARRAY_SIZE}, + block_count(array_size / (TBSIZE * elements_per_lane * chunks_per_block)) { - // The array size must be divisible by TBSIZE for kernel launches - if (ARRAY_SIZE % TBSIZE != 0) + std::cerr << "Elements per lane: " << elements_per_lane << std::endl; + std::cerr << "Chunks per block: " << chunks_per_block << std::endl; + // The array size must be divisible by total number of elements + // moved per block for kernel launches + if (ARRAY_SIZE % (TBSIZE * elements_per_lane * chunks_per_block) != 0) { std::stringstream ss; - ss << "Array size must be a multiple of " << TBSIZE; + ss << "Array size must be a multiple of elements operated on per block (" + << TBSIZE * elements_per_lane * chunks_per_block + << ")."; throw std::runtime_error(ss.str()); } + std::cerr << "block count " << block_count << std::endl; + +#ifdef NONTEMPORAL + std::cerr << "Using non-temporal memory operations." << std::endl; +#endif // Set device int count; @@ -49,7 +86,7 @@ HIPStream::HIPStream(const int ARRAY_SIZE, const int device_index) array_size = ARRAY_SIZE; // Allocate the host array for partial sums for dot kernels - sums = (T*)malloc(sizeof(T) * DOT_NUM_BLOCKS); + sums = (T*)malloc(block_count*sizeof(T)); // Check buffers fit on the device hipDeviceProp_t props; @@ -64,7 +101,7 @@ HIPStream::HIPStream(const int ARRAY_SIZE, const int device_index) check_error(); hipMalloc(&d_c, ARRAY_SIZE*sizeof(T)); check_error(); - hipMalloc(&d_sum, DOT_NUM_BLOCKS*sizeof(T)); + hipMalloc(&d_sum, block_count*sizeof(T)); check_error(); } @@ -115,68 +152,115 @@ void HIPStream::read_arrays(std::vector& a, std::vector& b, std::vector check_error(); } - -template -__global__ void copy_kernel(const T * a, T * c) +template +__launch_bounds__(TBSIZE) +__global__ +void copy_kernel(const T * __restrict a, T * __restrict c) { - const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; - c[i] = a[i]; + const size_t dx = (blockDim.x * gridDim.x) * elements_per_lane; + const size_t gidx = (threadIdx.x + blockIdx.x * blockDim.x) * elements_per_lane; + for (size_t i = 0; i != chunks_per_block; ++i) + { + for (size_t j = 0; j != elements_per_lane; ++j) + { + store(load(a[gidx + i * dx + j]), c[gidx + i * dx + j]); + } + } } template void HIPStream::copy() { - hipLaunchKernelGGL(HIP_KERNEL_NAME(copy_kernel), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_a, d_c); + hipLaunchKernelGGL(HIP_KERNEL_NAME(copy_kernel), + dim3(block_count), + dim3(TBSIZE), + 0, 0, d_a, d_c); check_error(); hipDeviceSynchronize(); check_error(); } -template -__global__ void mul_kernel(T * b, const T * c) +template +__launch_bounds__(TBSIZE) +__global__ +void mul_kernel(T * __restrict b, const T * __restrict c) { const T scalar = startScalar; - const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; - b[i] = scalar * c[i]; + const size_t dx = (blockDim.x * gridDim.x) * elements_per_lane; + const size_t gidx = (threadIdx.x + blockIdx.x * blockDim.x) * elements_per_lane; + for (size_t i = 0; i != chunks_per_block; ++i) + { + for (size_t j = 0; j != elements_per_lane; ++j) + { + store(scalar * load(c[gidx + i * dx + j]), b[gidx + i * dx + j]); + } + } } template void HIPStream::mul() { - hipLaunchKernelGGL(HIP_KERNEL_NAME(mul_kernel), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_b, d_c); + hipLaunchKernelGGL(HIP_KERNEL_NAME(mul_kernel), + dim3(block_count), + dim3(TBSIZE), + 0, 0, d_b, d_c); check_error(); hipDeviceSynchronize(); check_error(); } -template -__global__ void add_kernel(const T * a, const T * b, T * c) +template +__launch_bounds__(TBSIZE) +__global__ +void add_kernel(const T * __restrict a, const T * __restrict b, T * __restrict c) { - const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; - c[i] = a[i] + b[i]; + const size_t dx = (blockDim.x * gridDim.x) * elements_per_lane; + const size_t gidx = (threadIdx.x + blockIdx.x * blockDim.x) * elements_per_lane; + for (size_t i = 0; i != chunks_per_block; ++i) + { + for (size_t j = 0; j != elements_per_lane; ++j) + { + store(load(a[gidx + i * dx + j]) + load(b[gidx + i * dx + j]), c[gidx + i * dx + j]); + } + } } template void HIPStream::add() { - hipLaunchKernelGGL(HIP_KERNEL_NAME(add_kernel), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_a, d_b, d_c); + hipLaunchKernelGGL(HIP_KERNEL_NAME(add_kernel), + dim3(block_count), + dim3(TBSIZE), + 0, 0, d_a, d_b, d_c); check_error(); hipDeviceSynchronize(); check_error(); } -template -__global__ void triad_kernel(T * a, const T * b, const T * c) +template +__launch_bounds__(TBSIZE) +__global__ +void triad_kernel(T * __restrict a, const T * __restrict b, const T * __restrict c) { const T scalar = startScalar; - const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; - a[i] = b[i] + scalar * c[i]; + const size_t dx = (blockDim.x * gridDim.x) * elements_per_lane; + const size_t gidx = (threadIdx.x + blockIdx.x * blockDim.x) * elements_per_lane; + for (size_t i = 0; i != chunks_per_block; ++i) + { + for (size_t j = 0; j != elements_per_lane; ++j) + { + store(load(b[gidx + i * dx + j]) + scalar * load(c[gidx + i * dx + j]), a[gidx + i * dx + j]); + } + } } template void HIPStream::triad() { - hipLaunchKernelGGL(HIP_KERNEL_NAME(triad_kernel), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_a, d_b, d_c); + hipLaunchKernelGGL(HIP_KERNEL_NAME(triad_kernel), + dim3(block_count), + dim3(TBSIZE), + 0, 0, d_a, d_b, d_c); check_error(); hipDeviceSynchronize(); check_error(); @@ -199,42 +283,78 @@ void HIPStream::nstream() check_error(); } -template -__global__ void dot_kernel(const T * a, const T * b, T * sum, int array_size) +template +struct Reducer +{ + template + __device__ + static + void reduce(I it) noexcept + { + if (n == 1) return; + +#if defined(__HIP_PLATFORM_NVCC__) + constexpr unsigned int warpSize = 32; +#endif + constexpr bool is_same_warp{n <= warpSize * 2}; + if (static_cast(threadIdx.x) < n/2) + { + it[threadIdx.x] += it[threadIdx.x + n/2]; + } + is_same_warp ? __threadfence_block() : __syncthreads(); + Reducer::reduce(it); + } +}; + +template<> +struct Reducer<1u> { + template + __device__ + static + void reduce(I) noexcept + {} +}; + +template +__launch_bounds__(TBSIZE) +__global__ +__global__ void dot_kernel(const T * __restrict a, const T * __restrict b, T * __restrict sum) { __shared__ T tb_sum[TBSIZE]; + const size_t tidx = threadIdx.x; + const size_t dx = (blockDim.x * gridDim.x) * elements_per_lane; + const size_t gidx = (tidx + blockIdx.x * blockDim.x) * elements_per_lane; - int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; - const size_t local_i = hipThreadIdx_x; - - tb_sum[local_i] = 0.0; - for (; i < array_size; i += hipBlockDim_x*hipGridDim_x) - tb_sum[local_i] += a[i] * b[i]; - - for (int offset = hipBlockDim_x / 2; offset > 0; offset /= 2) + T tmp{0}; + for (size_t i = 0; i != chunks_per_block; ++i) { - __syncthreads(); - if (local_i < offset) + for (size_t j = 0; j != elements_per_lane; ++j) { - tb_sum[local_i] += tb_sum[local_i+offset]; + tmp += load(a[gidx + i * dx + j]) * load(b[gidx + i * dx + j]); } } + tb_sum[tidx] = tmp; + __syncthreads(); - if (local_i == 0) - sum[hipBlockIdx_x] = tb_sum[local_i]; + Reducer<>::reduce(tb_sum); + if (tidx) return; + store(tb_sum[0], sum[blockIdx.x]); } template T HIPStream::dot() { - hipLaunchKernelGGL(HIP_KERNEL_NAME(dot_kernel), dim3(DOT_NUM_BLOCKS), dim3(TBSIZE), 0, 0, d_a, d_b, d_sum, array_size); + hipLaunchKernelGGL(HIP_KERNEL_NAME(dot_kernel), + dim3(block_count), + dim3(TBSIZE), + 0, 0, d_a, d_b, d_sum); check_error(); - hipMemcpy(sums, d_sum, DOT_NUM_BLOCKS*sizeof(T), hipMemcpyDeviceToHost); + hipMemcpy(sums, d_sum, block_count*sizeof(T), hipMemcpyDeviceToHost); check_error(); T sum = 0.0; - for (int i = 0; i < DOT_NUM_BLOCKS; i++) + for (int i = 0; i < block_count; i++) sum += sums[i]; return sum; diff --git a/src/hip/HIPStream.h b/src/hip/HIPStream.h index 44a2893..ecdf929 100644 --- a/src/hip/HIPStream.h +++ b/src/hip/HIPStream.h @@ -18,9 +18,42 @@ template class HIPStream : public Stream { +#ifdef __HIP_PLATFORM_NVCC__ + #ifndef DWORDS_PER_LANE + #define DWORDS_PER_LANE 1 + #endif + #ifndef CHUNKS_PER_BLOCK + #define CHUNKS_PER_BLOCK 8 + #endif +#else + #ifndef DWORDS_PER_LANE + #define DWORDS_PER_LANE 4 + #endif + #ifndef CHUNKS_PER_BLOCK + #define CHUNKS_PER_BLOCK 1 + #endif +#endif + // Make sure that either: + // DWORDS_PER_LANE is less than sizeof(T), in which case we default to 1 element + // or + // DWORDS_PER_LANE is divisible by sizeof(T) + static_assert((DWORDS_PER_LANE * sizeof(unsigned int) < sizeof(T)) || + (DWORDS_PER_LANE * sizeof(unsigned int) % sizeof(T) == 0), + "DWORDS_PER_LANE not divisible by sizeof(element_type)"); + + static constexpr unsigned int chunks_per_block{CHUNKS_PER_BLOCK}; + static constexpr unsigned int dwords_per_lane{DWORDS_PER_LANE}; + // Take into account the datatype size + // That is, if we specify 4 DWORDS_PER_LANE, this is 2 FP64 elements + // and 4 FP32 elements + static constexpr unsigned int elements_per_lane{ + (DWORDS_PER_LANE * sizeof(unsigned int)) < sizeof(T) ? 1 : ( + DWORDS_PER_LANE * sizeof(unsigned int) / sizeof(T))}; + protected: // Size of arrays int array_size; + int block_count; // Host array for partial sums for dot kernel T *sums; diff --git a/src/hip/model.cmake b/src/hip/model.cmake index 78150c4..3ffaf7a 100644 --- a/src/hip/model.cmake +++ b/src/hip/model.cmake @@ -2,6 +2,19 @@ register_flag_required(CMAKE_CXX_COMPILER "Absolute path to the AMD HIP C++ compiler") +register_flag_optional(USE_NONTEMPORAL_MEM + "Flag indicating to use non-temporal memory accesses to bypass cache." + "OFF") + +# TODO: Better flag descriptions +register_flag_optional(DWORDS_PER_LANE "Flag indicating the number of double data types per wavefront lane." 4) +register_flag_optional(CHUNKS_PER_BLOCK "Flag indicating the chunks per block." 1) + macro(setup) - # nothing to do here as hipcc does everything correctly, what a surprise! + # Ensure we set the proper preprocessor directives + if (USE_NONTEMPORAL_MEM) + add_definitions(-DNONTEMPORAL) + endif () + register_definitions(DWORDS_PER_LANE=${DWORDS_PER_LANE}) + register_definitions(CHUNKS_PER_BLOCK=${CHUNKS_PER_BLOCK}) endmacro() \ No newline at end of file