Merge pull request #26 from psteinb/fix_sharedmem_hip

Fix sharedmem hip
This commit is contained in:
James Price 2017-02-28 12:36:37 +00:00 committed by GitHub
commit 8a47b72764
2 changed files with 12 additions and 15 deletions

View File

@ -3,13 +3,7 @@
HIPCC = hipcc HIPCC = hipcc
ifndef CUDA_PATH hip-stream: main.cpp HIPStream.cpp
ifeq (,$(wildcard /usr/local/bin/nvcc))
$(error /usr/local/bin/nvcc not found, set CUDA_PATH instead)
endif
endif
hip-stream: main.cpp HIPStream.cu
$(HIPCC) $(CXXFLAGS) -std=c++11 -DHIP $^ $(EXTRA_FLAGS) -o $@ $(HIPCC) $(CXXFLAGS) -std=c++11 -DHIP $^ $(EXTRA_FLAGS) -o $@
.PHONY: clean .PHONY: clean

View File

@ -72,14 +72,19 @@ HIPStream<T>::HIPStream(const unsigned int ARRAY_SIZE, const int device_index)
template <class T> template <class T>
HIPStream<T>::~HIPStream() HIPStream<T>::~HIPStream()
{ {
free(sums);
hipFree(d_a); hipFree(d_a);
check_error(); check_error();
hipFree(d_b); hipFree(d_b);
check_error(); check_error();
hipFree(d_c); hipFree(d_c);
check_error(); check_error();
hipFree(d_sum);
check_error();
} }
template <typename T> template <typename T>
__global__ void init_kernel(hipLaunchParm lp, T * a, T * b, T * c, T initA, T initB, T initC) __global__ void init_kernel(hipLaunchParm lp, T * a, T * b, T * c, T initA, T initB, T initC)
{ {
@ -177,22 +182,20 @@ void HIPStream<T>::triad()
check_error(); check_error();
} }
template <class T> template <class T>
__global__ void dot_kernel(hipLaunchParm lp, const T * a, const T * b, T * sum, unsigned int array_size) __global__ void dot_kernel(hipLaunchParm lp, const T * a, const T * b, T * sum, unsigned int array_size)
{ {
extern __shared__ __align__(sizeof(T)) unsigned char smem[]; HIP_DYNAMIC_SHARED(T,tb_sum);
T *tb_sum = reinterpret_cast<T*>(smem);
int i = blockDim.x * blockIdx.x + threadIdx.x; int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x;
const size_t local_i = threadIdx.x; const size_t local_i = hipThreadIdx_x;
tb_sum[local_i] = 0.0; tb_sum[local_i] = 0.0;
for (; i < array_size; i += blockDim.x*gridDim.x) for (; i < array_size; i += hipBlockDim_x*hipGridDim_x)
tb_sum[local_i] += a[i] * b[i]; tb_sum[local_i] += a[i] * b[i];
for (int offset = blockDim.x / 2; offset > 0; offset /= 2) for (int offset = hipBlockDim_x / 2; offset > 0; offset /= 2)
{ {
__syncthreads(); __syncthreads();
if (local_i < offset) if (local_i < offset)
@ -202,7 +205,7 @@ __global__ void dot_kernel(hipLaunchParm lp, const T * a, const T * b, T * sum,
} }
if (local_i == 0) if (local_i == 0)
sum[blockIdx.x] = tb_sum[local_i]; sum[hipBlockIdx_x] = tb_sum[local_i];
} }
template <class T> template <class T>