Fixed memory management for GPU, now working with OpenMP and CUDA
This commit is contained in:
parent
57189e7ca5
commit
1a60f130eb
@ -16,21 +16,36 @@ RAJAStream<T>::RAJAStream(const unsigned int ARRAY_SIZE, const int device_index)
|
||||
{
|
||||
RangeSegment seg(0, ARRAY_SIZE);
|
||||
index_set.push_back(seg);
|
||||
|
||||
#ifdef RAJA_TARGET_CPU
|
||||
d_a = new T[ARRAY_SIZE];
|
||||
d_b = new T[ARRAY_SIZE];
|
||||
d_c = new T[ARRAY_SIZE];
|
||||
#else
|
||||
cudaMallocManaged((void**)&d_a, sizeof(T)*ARRAY_SIZE, cudaMemAttachGlobal);
|
||||
cudaMallocManaged((void**)&d_b, sizeof(T)*ARRAY_SIZE, cudaMemAttachGlobal);
|
||||
cudaMallocManaged((void**)&d_c, sizeof(T)*ARRAY_SIZE, cudaMemAttachGlobal);
|
||||
cudaDeviceSynchronize();
|
||||
#endif
|
||||
}
|
||||
|
||||
template <class T>
|
||||
RAJAStream<T>::~RAJAStream()
|
||||
{
|
||||
#ifdef RAJA_TARGET_CPU
|
||||
delete[] d_a;
|
||||
delete[] d_b;
|
||||
delete[] d_c;
|
||||
#else
|
||||
cudaFree(d_a);
|
||||
cudaFree(d_b);
|
||||
cudaFree(d_c);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void RAJAStream<T>::write_arrays(const std::vector<T>& a, const std::vector<T>& b, const std::vector<T>& c)
|
||||
void RAJAStream<T>::write_arrays(
|
||||
const std::vector<T>& a, const std::vector<T>& b, const std::vector<T>& c)
|
||||
{
|
||||
std::copy(a.begin(), a.end(), d_a);
|
||||
std::copy(b.begin(), b.end(), d_b);
|
||||
@ -38,48 +53,59 @@ void RAJAStream<T>::write_arrays(const std::vector<T>& a, const std::vector<T>&
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void RAJAStream<T>::read_arrays(std::vector<T>& a, std::vector<T>& b, std::vector<T>& c)
|
||||
void RAJAStream<T>::read_arrays(
|
||||
std::vector<T>& a, std::vector<T>& b, std::vector<T>& c)
|
||||
{
|
||||
std::copy(d_a, d_a + array_size - 1, a.data());
|
||||
std::copy(d_b, d_b + array_size - 1, b.data());
|
||||
std::copy(d_c, d_c + array_size - 1, c.data());
|
||||
std::copy(d_a, d_a + array_size, a.data());
|
||||
std::copy(d_b, d_b + array_size, b.data());
|
||||
std::copy(d_c, d_c + array_size, c.data());
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void RAJAStream<T>::copy()
|
||||
{
|
||||
T* a = d_a;
|
||||
T* c = d_c;
|
||||
forall<policy>(index_set, [=] RAJA_DEVICE (int index)
|
||||
{
|
||||
d_c[index] = d_a[index];
|
||||
c[index] = a[index];
|
||||
});
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void RAJAStream<T>::mul()
|
||||
{
|
||||
T* b = d_b;
|
||||
T* c = d_c;
|
||||
const T scalar = 3.0;
|
||||
forall<policy>(index_set, [=] RAJA_DEVICE (int index)
|
||||
{
|
||||
d_b[index] = scalar*d_c[index];
|
||||
b[index] = scalar*c[index];
|
||||
});
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void RAJAStream<T>::add()
|
||||
{
|
||||
T* a = d_a;
|
||||
T* b = d_b;
|
||||
T* c = d_c;
|
||||
forall<policy>(index_set, [=] RAJA_DEVICE (int index)
|
||||
{
|
||||
d_c[index] = d_a[index] + d_b[index];
|
||||
c[index] = a[index] + b[index];
|
||||
});
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void RAJAStream<T>::triad()
|
||||
{
|
||||
T* a = d_a;
|
||||
T* b = d_b;
|
||||
T* c = d_c;
|
||||
const T scalar = 3.0;
|
||||
forall<policy>(index_set, [=] RAJA_DEVICE (int index)
|
||||
{
|
||||
d_a[index] = d_b[index] + scalar*d_c[index];
|
||||
a[index] = b[index] + scalar*c[index];
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@ -14,15 +14,15 @@
|
||||
|
||||
#define IMPLEMENTATION_STRING "RAJA"
|
||||
|
||||
#ifdef RAJA_USE_CUDA
|
||||
const size_t block_size = 128;
|
||||
typedef RAJA::IndexSet::ExecPolicy<
|
||||
RAJA::seq_segit,
|
||||
RAJA::cuda_exec_async<block_size>> policy;
|
||||
#else
|
||||
#ifdef RAJA_TARGET_CPU
|
||||
typedef RAJA::IndexSet::ExecPolicy<
|
||||
RAJA::seq_segit,
|
||||
RAJA::omp_parallel_for_exec> policy;
|
||||
#else
|
||||
const size_t block_size = 128;
|
||||
typedef RAJA::IndexSet::ExecPolicy<
|
||||
RAJA::seq_segit,
|
||||
RAJA::cuda_exec<block_size>> policy;
|
||||
#endif
|
||||
|
||||
template <class T>
|
||||
|
||||
Loading…
Reference in New Issue
Block a user