Use static shared memory in dot for CUDA and HIP
This commit is contained in:
parent
e7a619c63c
commit
94e0900377
@ -182,9 +182,7 @@ void CUDAStream<T>::triad()
|
|||||||
template <class T>
|
template <class T>
|
||||||
__global__ void dot_kernel(const T * a, const T * b, T * sum, unsigned int array_size)
|
__global__ void dot_kernel(const T * a, const T * b, T * sum, unsigned int array_size)
|
||||||
{
|
{
|
||||||
|
__shared__ T tb_sum[TBSIZE];
|
||||||
extern __shared__ __align__(sizeof(T)) unsigned char smem[];
|
|
||||||
T *tb_sum = reinterpret_cast<T*>(smem);
|
|
||||||
|
|
||||||
int i = blockDim.x * blockIdx.x + threadIdx.x;
|
int i = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
const size_t local_i = threadIdx.x;
|
const size_t local_i = threadIdx.x;
|
||||||
@ -209,7 +207,7 @@ __global__ void dot_kernel(const T * a, const T * b, T * sum, unsigned int array
|
|||||||
template <class T>
|
template <class T>
|
||||||
T CUDAStream<T>::dot()
|
T CUDAStream<T>::dot()
|
||||||
{
|
{
|
||||||
dot_kernel<<<DOT_NUM_BLOCKS, TBSIZE, sizeof(T)*TBSIZE>>>(d_a, d_b, d_sum, array_size);
|
dot_kernel<<<DOT_NUM_BLOCKS, TBSIZE>>>(d_a, d_b, d_sum, array_size);
|
||||||
check_error();
|
check_error();
|
||||||
|
|
||||||
cudaMemcpy(sums, d_sum, DOT_NUM_BLOCKS*sizeof(T), cudaMemcpyDeviceToHost);
|
cudaMemcpy(sums, d_sum, DOT_NUM_BLOCKS*sizeof(T), cudaMemcpyDeviceToHost);
|
||||||
|
|||||||
@ -185,8 +185,7 @@ void HIPStream<T>::triad()
|
|||||||
template <class T>
|
template <class T>
|
||||||
__global__ void dot_kernel(hipLaunchParm lp, const T * a, const T * b, T * sum, unsigned int array_size)
|
__global__ void dot_kernel(hipLaunchParm lp, const T * a, const T * b, T * sum, unsigned int array_size)
|
||||||
{
|
{
|
||||||
|
__shared__ T tb_sum[TBSIZE];
|
||||||
HIP_DYNAMIC_SHARED(T,tb_sum);
|
|
||||||
|
|
||||||
int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x;
|
int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x;
|
||||||
const size_t local_i = hipThreadIdx_x;
|
const size_t local_i = hipThreadIdx_x;
|
||||||
@ -211,7 +210,7 @@ __global__ void dot_kernel(hipLaunchParm lp, const T * a, const T * b, T * sum,
|
|||||||
template <class T>
|
template <class T>
|
||||||
T HIPStream<T>::dot()
|
T HIPStream<T>::dot()
|
||||||
{
|
{
|
||||||
hipLaunchKernel(HIP_KERNEL_NAME(dot_kernel), dim3(DOT_NUM_BLOCKS), dim3(TBSIZE), sizeof(T)*TBSIZE, 0, d_a, d_b, d_sum, array_size);
|
hipLaunchKernel(HIP_KERNEL_NAME(dot_kernel), dim3(DOT_NUM_BLOCKS), dim3(TBSIZE), 0, 0, d_a, d_b, d_sum, array_size);
|
||||||
check_error();
|
check_error();
|
||||||
|
|
||||||
hipMemcpy(sums, d_sum, DOT_NUM_BLOCKS*sizeof(T), hipMemcpyDeviceToHost);
|
hipMemcpy(sums, d_sum, DOT_NUM_BLOCKS*sizeof(T), hipMemcpyDeviceToHost);
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user