added printf style error messages to nail down memory access problems

This commit is contained in:
Peter Steinbach 2017-02-27 17:01:38 +01:00
parent 0fc6722684
commit f7af8ebc91

View File

@ -78,6 +78,7 @@ HCStream<T>::~HCStream()
template <class T> template <class T>
void HCStream<T>::init_arrays(T _a, T _b, T _c) void HCStream<T>::init_arrays(T _a, T _b, T _c)
{ {
std::cout << __FILE__ << ":" << __LINE__ << "\t" << "initializing arrays\n";
std::vector<T> temp(array_size,_a); std::vector<T> temp(array_size,_a);
hc::copy(temp.begin(), temp.end(),this->d_a); hc::copy(temp.begin(), temp.end(),this->d_a);
@ -93,6 +94,7 @@ template <class T>
void HCStream<T>::read_arrays(std::vector<T>& a, std::vector<T>& b, std::vector<T>& c) void HCStream<T>::read_arrays(std::vector<T>& a, std::vector<T>& b, std::vector<T>& c)
{ {
// Copy device memory to host // Copy device memory to host
std::cout << __FILE__ << ":" << __LINE__ << "\t" << "read arrays\n";
hc::copy(d_a,a.begin()); hc::copy(d_a,a.begin());
hc::copy(d_b,b.begin()); hc::copy(d_b,b.begin());
hc::copy(d_c,c.begin()); hc::copy(d_c,c.begin());
@ -103,6 +105,7 @@ template <class T>
void HCStream<T>::copy() void HCStream<T>::copy()
{ {
std::cout << __FILE__ << ":" << __LINE__ << "\t" << "copy\n";
hc::array<T,1>& device_a = this->d_a; hc::array<T,1>& device_a = this->d_a;
hc::array<T,1>& device_c = this->d_c; hc::array<T,1>& device_c = this->d_c;
@ -123,6 +126,7 @@ void HCStream<T>::copy()
template <class T> template <class T>
void HCStream<T>::mul() void HCStream<T>::mul()
{ {
std::cout << __FILE__ << ":" << __LINE__ << "\t" << "mul\n";
const T scalar = 0.3; const T scalar = 0.3;
hc::array<T,1>& device_b = this->d_b; hc::array<T,1>& device_b = this->d_b;
hc::array<T,1>& device_c = this->d_c; hc::array<T,1>& device_c = this->d_c;
@ -144,7 +148,7 @@ void HCStream<T>::mul()
template <class T> template <class T>
void HCStream<T>::add() void HCStream<T>::add()
{ {
std::cout << __FILE__ << ":" << __LINE__ << "\t" << "add\n";
hc::array<T,1>& device_a = this->d_a; hc::array<T,1>& device_a = this->d_a;
hc::array<T,1>& device_b = this->d_b; hc::array<T,1>& device_b = this->d_b;
hc::array<T,1>& device_c = this->d_c; hc::array<T,1>& device_c = this->d_c;
@ -166,6 +170,7 @@ void HCStream<T>::add()
template <class T> template <class T>
void HCStream<T>::triad() void HCStream<T>::triad()
{ {
std::cout << __FILE__ << ":" << __LINE__ << "\t" << "triad\n";
const T scalar = 0.3; const T scalar = 0.3;
hc::array<T,1>& device_a = this->d_a; hc::array<T,1>& device_a = this->d_a;
hc::array<T,1>& device_b = this->d_b; hc::array<T,1>& device_b = this->d_b;
@ -188,16 +193,17 @@ void HCStream<T>::triad()
template <class T> template <class T>
T HCStream<T>::dot() T HCStream<T>::dot()
{ {
hc::array<T,1>& device_a = this->d_a; std::cout << __FILE__ << ":" << __LINE__ << "\t" << "dot\n";
hc::array<T,1> product = this->d_b; hc::array_view<T,1> view_a(this->d_a);
hc::array_view<T,1> view_p(this->d_b);
T sum = static_cast<T>(0); T sum = static_cast<T>(0);
try{ try{
// launch a GPU kernel to compute the saxpy in parallel // launch a GPU kernel to compute the saxpy in parallel
hc::completion_future future_kernel = hc::parallel_for_each(hc::extent<1>(array_size) hc::completion_future future_kernel = hc::parallel_for_each(view_a.get_extent(),
, [&](hc::index<1> i) [[hc]] { [&](hc::index<1> i) [[hc]] {
product[i] *= device_a[i]; view_p[i] = view_p[i]*view_a[i];
}); });
future_kernel.wait(); future_kernel.wait();
} }
@ -206,8 +212,11 @@ T HCStream<T>::dot()
throw; throw;
} }
std::cout << __FILE__ << ":" << __LINE__ << "\t" << "dot - for_each done\n";
std::vector<T> h_product(array_size,sum); std::vector<T> h_product(array_size,sum);
hc::copy(product,h_product.begin()); hc::copy(view_p,h_product.begin());
std::cout << __FILE__ << ":" << __LINE__ << "\t" << "dot - copy-out done\n";
sum = std::accumulate(h_product.begin(), h_product.end(),sum); sum = std::accumulate(h_product.begin(), h_product.end(),sum);