diff --git a/ACCStream.cpp b/ACCStream.cpp index 664668d..9e0e3e7 100644 --- a/ACCStream.cpp +++ b/ACCStream.cpp @@ -120,6 +120,22 @@ void ACCStream::triad() } } +template +void ACCStream::nstream() +{ + const T scalar = startScalar; + + int array_size = this->array_size; + T * restrict a = this->a; + T * restrict b = this->b; + T * restrict c = this->c; + #pragma acc parallel loop present(a[0:array_size], b[0:array_size], c[0:array_size]) wait + for (int i = 0; i < array_size; i++) + { + a[i] += b[i] + scalar * c[i]; + } +} + template T ACCStream::dot() { diff --git a/ACCStream.h b/ACCStream.h index 3d8695e..4cb9d25 100644 --- a/ACCStream.h +++ b/ACCStream.h @@ -35,6 +35,7 @@ class ACCStream : public Stream virtual void add() override; virtual void mul() override; virtual void triad() override; + virtual void nstream() override; virtual T dot() override; virtual void init_arrays(T initA, T initB, T initC) override; diff --git a/CHANGELOG.md b/CHANGELOG.md index 25eddff..8256774 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ All notable changes to this project will be documented in this file. - Kokkos 3 build system (No code changes made). - SYCL build rules for ComputeCpp, DPCPP and HipSYCL. - Support for CUDA Managed Memory and Page Fault memory. +- Added nstream kernel from PRK with associate command line option. ### Changed - Default branch renamed from `master` to `main`. @@ -25,6 +26,7 @@ All notable changes to this project will be documented in this file. - Reorder OpenCL objects in class so destructors are called in safe order. - Ensure all OpenCL kernels are present in destructor. - Unified run function in driver code to reduce code duplication, output should be uneffected. +- Normalise sum result by expected value to help false negative errors. ### Removed - Pre-building of kernels in SYCL version to ensure compatibility with SYCL 1.2.1. diff --git a/CUDAStream.cu b/CUDAStream.cu index 32aae49..b467d00 100644 --- a/CUDAStream.cu +++ b/CUDAStream.cu @@ -212,6 +212,23 @@ void CUDAStream::triad() check_error(); } +template +__global__ void nstream_kernel(T * a, const T * b, const T * c) +{ + const T scalar = startScalar; + const int i = blockDim.x * blockIdx.x + threadIdx.x; + a[i] += b[i] + scalar * c[i]; +} + +template +void CUDAStream::nstream() +{ + nstream_kernel<<>>(d_a, d_b, d_c); + check_error(); + cudaDeviceSynchronize(); + check_error(); +} + template __global__ void dot_kernel(const T * a, const T * b, T * sum, int array_size) { diff --git a/CUDAStream.h b/CUDAStream.h index df85802..83b8c66 100644 --- a/CUDAStream.h +++ b/CUDAStream.h @@ -50,6 +50,7 @@ class CUDAStream : public Stream virtual void add() override; virtual void mul() override; virtual void triad() override; + virtual void nstream() override; virtual T dot() override; virtual void init_arrays(T initA, T initB, T initC) override; diff --git a/HIPStream.cpp b/HIPStream.cpp index d790ee5..fbc3b71 100644 --- a/HIPStream.cpp +++ b/HIPStream.cpp @@ -182,6 +182,23 @@ void HIPStream::triad() check_error(); } +template +__global__ void nstream_kernel(T * a, const T * b, const T * c) +{ + const T scalar = startScalar; + const int i = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; + a[i] += b[i] + scalar * c[i]; +} + +template +void HIPStream::nstream() +{ + hipLaunchKernelGGL(HIP_KERNEL_NAME(nstream_kernel), dim3(array_size/TBSIZE), dim3(TBSIZE), 0, 0, d_a, d_b, d_c); + check_error(); + hipDeviceSynchronize(); + check_error(); +} + template __global__ void dot_kernel(const T * a, const T * b, T * sum, int array_size) { diff --git a/HIPStream.h b/HIPStream.h index fdab392..44a2893 100644 --- a/HIPStream.h +++ b/HIPStream.h @@ -41,6 +41,7 @@ class HIPStream : public Stream virtual void add() override; virtual void mul() override; virtual void triad() override; + virtual void nstream() override; virtual T dot() override; virtual void init_arrays(T initA, T initB, T initC) override; diff --git a/KokkosStream.cpp b/KokkosStream.cpp index 09c1eaf..00efe92 100644 --- a/KokkosStream.cpp +++ b/KokkosStream.cpp @@ -119,6 +119,21 @@ void KokkosStream::triad() Kokkos::fence(); } +template +void KokkosStream::nstream() +{ + Kokkos::View a(*d_a); + Kokkos::View b(*d_b); + Kokkos::View c(*d_c); + + const T scalar = startScalar; + Kokkos::parallel_for(array_size, KOKKOS_LAMBDA (const long index) + { + a[index] += b[index] + scalar*c[index]; + }); + Kokkos::fence(); +} + template T KokkosStream::dot() { diff --git a/KokkosStream.hpp b/KokkosStream.hpp index e88622c..3aa7cf5 100644 --- a/KokkosStream.hpp +++ b/KokkosStream.hpp @@ -41,6 +41,7 @@ class KokkosStream : public Stream virtual void add() override; virtual void mul() override; virtual void triad() override; + virtual void nstream() override; virtual T dot() override; virtual void init_arrays(T initA, T initB, T initC) override; diff --git a/OCLStream.cpp b/OCLStream.cpp index 6c88eda..be88ba9 100644 --- a/OCLStream.cpp +++ b/OCLStream.cpp @@ -61,6 +61,14 @@ std::string kernels{R"CLC( const size_t i = get_global_id(0); a[i] = b[i] + scalar * c[i]; } + kernel void nstream( + global TYPE * restrict a, + global const TYPE * restrict b, + global const TYPE * restrict c) + { + const size_t i = get_global_id(0); + a[i] += b[i] + scalar * c[i]; + } kernel void stream_dot( global const TYPE * restrict a, @@ -157,6 +165,7 @@ OCLStream::OCLStream(const int ARRAY_SIZE, const int device_index) mul_kernel = new cl::KernelFunctor(program, "mul"); add_kernel = new cl::KernelFunctor(program, "add"); triad_kernel = new cl::KernelFunctor(program, "triad"); + nstream_kernel = new cl::KernelFunctor(program, "nstream"); dot_kernel = new cl::KernelFunctor(program, "stream_dot"); array_size = ARRAY_SIZE; @@ -186,6 +195,7 @@ OCLStream::~OCLStream() delete mul_kernel; delete add_kernel; delete triad_kernel; + delete nstream_kernel; delete dot_kernel; devices.clear(); @@ -231,6 +241,16 @@ void OCLStream::triad() queue.finish(); } +template +void OCLStream::nstream() +{ + (*nstream_kernel)( + cl::EnqueueArgs(queue, cl::NDRange(array_size)), + d_a, d_b, d_c + ); + queue.finish(); +} + template T OCLStream::dot() { diff --git a/OCLStream.h b/OCLStream.h index 3085aca..bcdf9ac 100644 --- a/OCLStream.h +++ b/OCLStream.h @@ -47,6 +47,7 @@ class OCLStream : public Stream cl::KernelFunctor * mul_kernel; cl::KernelFunctor *add_kernel; cl::KernelFunctor *triad_kernel; + cl::KernelFunctor *nstream_kernel; cl::KernelFunctor *dot_kernel; // NDRange configuration for the dot kernel @@ -62,6 +63,7 @@ class OCLStream : public Stream virtual void add() override; virtual void mul() override; virtual void triad() override; + virtual void nstream() override; virtual T dot() override; virtual void init_arrays(T initA, T initB, T initC) override; diff --git a/OMPStream.cpp b/OMPStream.cpp index 6b2800d..8063987 100644 --- a/OMPStream.cpp +++ b/OMPStream.cpp @@ -191,6 +191,31 @@ void OMPStream::triad() #endif } +template +void OMPStream::nstream() +{ + const T scalar = startScalar; + +#ifdef OMP_TARGET_GPU + int array_size = this->array_size; + T *a = this->a; + T *b = this->b; + T *c = this->c; + #pragma omp target teams distribute parallel for simd +#else + #pragma omp parallel for +#endif + for (int i = 0; i < array_size; i++) + { + a[i] += b[i] + scalar * c[i]; + } + #if defined(OMP_TARGET_GPU) && defined(_CRAYC) + // If using the Cray compiler, the kernels do not block, so this update forces + // a small copy to ensure blocking so that timing is correct + #pragma omp target update from(a[0:0]) + #endif +} + template T OMPStream::dot() { diff --git a/OMPStream.h b/OMPStream.h index 8c93986..5a5622f 100644 --- a/OMPStream.h +++ b/OMPStream.h @@ -36,6 +36,7 @@ class OMPStream : public Stream virtual void add() override; virtual void mul() override; virtual void triad() override; + virtual void nstream() override; virtual T dot() override; virtual void init_arrays(T initA, T initB, T initC) override; diff --git a/README.md b/README.md index 2d9c8cd..25ba9ae 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,15 @@ But this information is not typically available in real HPC codes today, where t BabelStream therefore provides a measure of what memory bandwidth performance can be attained (by a particular programming model) if you follow today's best parallel programming best practice. +BabelStream also includes the nstream kernel from the Parallel Research Kernels (PRK) project, available on [GitHub](https://github.com/ParRes/Kernels). +Details about PRK can be found in the following references: + +> Van der Wijngaart, Rob F., and Timothy G. Mattson. The parallel research kernels. IEEE High Performance Extreme Computing Conference (HPEC). IEEE, 2014. + +> R. F. Van der Wijngaart, A. Kayi, J. R. Hammond, G. Jost, T. St. John, S. Sridharan, T. G. Mattson, J. Abercrombie, and J. Nelson. Comparing runtime systems with exascale ambitions using the Parallel Research Kernels. ISC 2016, [DOI: 10.1007/978-3-319-41321-1_17](https://doi.org/10.1007/978-3-319-41321-1_17). + +> Jeff R. Hammond and Timothy G. Mattson. Evaluating data parallelism in C++ using the Parallel Research Kernels. IWOCL 2019, [DOI: 10.1145/3318170.3318192](https://doi.org/10.1145/3318170.3318192). + Website ------- diff --git a/STD20Stream.cpp b/STD20Stream.cpp index 09f83d4..8290033 100644 --- a/STD20Stream.cpp +++ b/STD20Stream.cpp @@ -94,6 +94,20 @@ void STD20Stream::triad() ); } +template +void STD20Stream::nstream() +{ + const T scalar = startScalar; + + std::for_each_n( + std::execution::par_unseq, + std::views::iota(0).begin(), array_size, + [&] (int i) { + a[i] += b[i] + scalar * c[i]; + } + ); +} + template T STD20Stream::dot() { diff --git a/STD20Stream.hpp b/STD20Stream.hpp index a8a3c4f..e5daa3c 100644 --- a/STD20Stream.hpp +++ b/STD20Stream.hpp @@ -33,6 +33,7 @@ class STD20Stream : public Stream virtual void add() override; virtual void mul() override; virtual void triad() override; + virtual void nstream() override; virtual T dot() override; virtual void init_arrays(T initA, T initB, T initC) override; diff --git a/STDStream.cpp b/STDStream.cpp index cd966ae..30ad420 100644 --- a/STDStream.cpp +++ b/STDStream.cpp @@ -72,6 +72,17 @@ void STDStream::triad() std::transform(exe_policy, b, b+array_size, c, a, [](T bi, T ci){ return bi+startScalar*ci; }); } +template +void STDStream::nstream() +{ + // a[i] += b[i] + scalar * c[i]; + // Need to do in two stages with C++11 STL. + // 1: a[i] += b[i] + // 2: a[i] += scalar * c[i]; + std::transform(exe_policy, a, a+array_size, b, a, [](T ai, T bi){ return ai + bi; }); + std::transform(exe_policy, a, a+array_size, c, a, [](T ai, T ci){ return ai + startScalar*ci; }); +} + template T STDStream::dot() { diff --git a/STDStream.h b/STDStream.h index 538f857..9ff7800 100644 --- a/STDStream.h +++ b/STDStream.h @@ -31,6 +31,7 @@ class STDStream : public Stream virtual void add() override; virtual void mul() override; virtual void triad() override; + virtual void nstream() override; virtual T dot() override; virtual void init_arrays(T initA, T initB, T initC) override; diff --git a/SYCL.make b/SYCL.make index 4326da5..58df8d0 100644 --- a/SYCL.make +++ b/SYCL.make @@ -33,66 +33,49 @@ Set ARCH to change device (defaulting to ""). endef -ifneq ($(COMPILER), DPCPP) +ifeq ($(COMPILER), HIPSYCL) +ifneq ($(TARGET), CPU) $(info $(arch_help)) ARCH= - +endif endif endif -SYCL_COMPUTECPP_SYCLFLAGS = $(shell $(SYCL_SDK_DIR)/bin/computecpp_info --dump-device-compiler-flags) -SYCL_COMPUTECPP_SYCLFLAGS_AMD = $(SYCL_COMPUTECPP_SYCLFLAGS) +SYCL_COMPUTECPP_SYCLFLAGS = $(shell $(SYCL_SDK_DIR)/bin/computecpp_info --dump-device-compiler-flags) -no-serial-memop -sycl-driver SYCL_COMPUTECPP_SYCLFLAGS_CPU = $(SYCL_COMPUTECPP_SYCLFLAGS) +SYCL_COMPUTECPP_SYCLFLAGS_AMD = $(SYCL_COMPUTECPP_SYCLFLAGS) SYCL_COMPUTECPP_SYCLFLAGS_NVIDIA = $(SYCL_COMPUTECPP_SYCLFLAGS) -sycl-target ptx64 SYCL_COMPUTECPP_SYCLCXX = $(SYCL_SDK_DIR)/bin/compute++ -SYCL_COMPUTECPP_FLAGS = -O3 --std=c++17 -SYCL_COMPUTECPP_LINK_FLAGS = -L$(SYCL_SDK_DIR)/lib -lComputeCpp -lOpenCL -Wl,--rpath=$(SYCL_SDK_DIR)/lib/ +SYCL_COMPUTECPP_FLAGS = -O3 -std=c++17 +SYCL_COMPUTECPP_LINK_FLAGS = -Wl,-rpath=$(SYCL_SDK_DIR)/lib/ $(SYCL_SDK_DIR)/lib/libComputeCpp.so -lOpenCL SYCL_COMPUTECPP_INCLUDE = -I$(SYCL_SDK_DIR)/include -SYCL_COMPUTECPP_CXX = g++ -SYCL_COMPUTECPP_DEPS = SYCLStream.sycl -SYCL_HIPSYCL_SYCLFLAGS_CPU = -O3 --std=c++17 --hipsycl-platform=cpu -SYCL_HIPSYCL_SYCLFLAGS_AMD = -O3 --std=c++17 --hipsycl-platform=rocm --hipsycl-gpu-arch=$(ARCH) -SYCL_HIPSYCL_SYCLFLAGS_NVIDIA = -O3 --std=c++17 --hipsycl-platform=cuda --hipsycl-gpu-arch=$(ARCH) +SYCL_HIPSYCL_SYCLFLAGS_CPU = --hipsycl-platform=cpu +SYCL_HIPSYCL_SYCLFLAGS_AMD = --hipsycl-platform=rocm --hipsycl-gpu-arch=$(ARCH) +SYCL_HIPSYCL_SYCLFLAGS_NVIDIA = --hipsycl-platform=cuda --hipsycl-gpu-arch=$(ARCH) SYCL_HIPSYCL_SYCLCXX = $(SYCL_SDK_DIR)/bin/syclcc -SYCL_HIPSYCL_FLAGS = $(SYCL_HIPSYCL_SYCLFLAGS_$(TARGET)) +SYCL_HIPSYCL_FLAGS = -O3 --std=c++17 SYCL_HIPSYCL_LINK_FLAGS = -L$(SYCL_SDK_DIR)/lib -Wl,-rpath,$(SYCL_SDK_DIR)/lib SYCL_HIPSYCL_INCLUDE = -SYCL_HIPSYCL_CXX = $(SYCL_HIPSYCL_SYCLCXX) -SYCL_HIPSYCL_DEPS = -SYCL_DPCPP_SYCLFLAGS_CPU = -O3 --std=c++17 -SYCL_DPCPP_SYCLFLAGS_NVIDIA = -O3 --std=c++17 -fsycl -fsycl-targets=nvptx64-nvidia-cuda-sycldevice -fsycl-unnamed-lambda +SYCL_DPCPP_SYCLFLAGS_NVIDIA = -fsycl -fsycl-targets=nvptx64-nvidia-cuda-sycldevice -fsycl-unnamed-lambda SYCL_DPCPP_SYCLCXX = dpcpp -SYCL_DPCPP_FLAGS = $(SYCL_DPCPP_SYCLFLAGS_CPU) +SYCL_DPCPP_FLAGS = -O3 --std=c++17 SYCL_DPCPP_LINK_FLAGS = SYCL_DPCPP_INCLUDE = -SYCL_DPCPP_CXX = dpcpp -SYCL_DPCPP_DEPS = SYCL_SYCLFLAGS = $(SYCL_$(COMPILER)_SYCLFLAGS_$(TARGET)) SYCL_SYCLCXX = $(SYCL_$(COMPILER)_SYCLCXX) - SYCL_FLAGS = $(SYCL_$(COMPILER)_FLAGS) SYCL_LINK_FLAGS = $(SYCL_$(COMPILER)_LINK_FLAGS) SYCL_INCLUDE = $(SYCL_$(COMPILER)_INCLUDE) -SYCL_CXX = $(SYCL_$(COMPILER)_CXX) -SYCL_DEPS = $(SYCL_$(COMPILER)_DEPS) -sycl-stream: main.o SYCLStream.o $(SYCL_DEPS) - $(SYCL_CXX) $(SYCL_FLAGS) -DSYCL main.o SYCLStream.o $(EXTRA_FLAGS) $(SYCL_LINK_FLAGS) -o $@ - -main.o: main.cpp - $(SYCL_CXX) $(SYCL_FLAGS) -DSYCL main.cpp -c $(SYCL_INCLUDE) $(EXTRA_FLAGS) -o $@ - -SYCLStream.o: SYCLStream.cpp $(SYCL_DEPS) - $(SYCL_CXX) $(SYCL_FLAGS) -DSYCL SYCLStream.cpp -c $(SYCL_INCLUDE) $(EXTRA_FLAGS) -o $@ - -SYCLStream.sycl: SYCLStream.cpp - $(SYCL_SYCLCXX) -DSYCL SYCLStream.cpp $(SYCL_SYCLFLAGS) -c $(SYCL_INCLUDE) -o $@ +# only ComputeCpp generates .sycl files which is a bit odd to deal with so we opted to compile everything together +sycl-stream: main.cpp SYCLStream.cpp + $(SYCL_SYCLCXX) $(SYCL_SYCLFLAGS) $(SYCL_FLAGS) $(SYCL_INCLUDE) -DSYCL $(EXTRA_FLAGS) $(SYCL_LINK_FLAGS) $^ -o $@ .PHONY: clean clean: - rm -f sycl-stream SYCLStream.sycl main.o SYCLStream.o + rm -f sycl-stream diff --git a/SYCLStream.cpp b/SYCLStream.cpp index d766996..3eab481 100644 --- a/SYCLStream.cpp +++ b/SYCLStream.cpp @@ -127,6 +127,23 @@ void SYCLStream::triad() queue->wait(); } +template +void SYCLStream::nstream() +{ + const T scalar = startScalar; + queue->submit([&](handler &cgh) + { + auto ka = d_a->template get_access(cgh); + auto kb = d_b->template get_access(cgh); + auto kc = d_c->template get_access(cgh); + cgh.parallel_for(range<1>{array_size}, [=](id<1> idx) + { + ka[idx] += kb[idx] + scalar * kc[idx]; + }); + }); + queue->wait(); +} + template T SYCLStream::dot() { diff --git a/SYCLStream.h b/SYCLStream.h index 0c73594..cd8e39a 100644 --- a/SYCLStream.h +++ b/SYCLStream.h @@ -42,6 +42,7 @@ class SYCLStream : public Stream virtual void add() override; virtual void mul() override; virtual void triad() override; + virtual void nstream() override; virtual T dot() override; virtual void init_arrays(T initA, T initB, T initC) override; diff --git a/Stream.h b/Stream.h index ff00a54..eb4ffd4 100644 --- a/Stream.h +++ b/Stream.h @@ -29,6 +29,7 @@ class Stream virtual void mul() = 0; virtual void add() = 0; virtual void triad() = 0; + virtual void nstream() = 0; virtual T dot() = 0; // Copy memory between host and device diff --git a/main.cpp b/main.cpp index 7eb2765..e78d7a1 100644 --- a/main.cpp +++ b/main.cpp @@ -58,13 +58,11 @@ void check_solution(const unsigned int ntimes, std::vector& a, std::vector template void run(); -template -void run_triad(); - // Options for running the benchmark: // - All 5 kernels (Copy, Add, Mul, Triad, Dot). // - Triad only. -enum class Benchmark {All, Triad}; +// - Nstream only. +enum class Benchmark {All, Triad, Nstream}; // Selected run options. Benchmark selection = Benchmark::All; @@ -166,6 +164,27 @@ std::vector> run_triad(Stream *stream) return timings; } +// Run the Nstream kernel +template +std::vector> run_nstream(Stream *stream) +{ + std::vector> timings(1); + + // Declare timers + std::chrono::high_resolution_clock::time_point t1, t2; + + // Run nstream in loop + for (int k = 0; k < num_times; k++) { + t1 = std::chrono::high_resolution_clock::now(); + stream->nstream(); + t2 = std::chrono::high_resolution_clock::now(); + timings[0].push_back(std::chrono::duration_cast >(t2 - t1).count()); + } + + return timings; + +} + // Generic run routine // Runs the kernel(s) and prints output. @@ -275,6 +294,10 @@ void run() break; case Benchmark::Triad: timings = run_triad(stream); + break; + case Benchmark::Nstream: + timings = run_nstream(stream); + break; }; // Check solutions @@ -313,17 +336,26 @@ void run() } - if (selection == Benchmark::All) + if (selection == Benchmark::All || selection == Benchmark::Nstream) { - std::string labels[5] = {"Copy", "Mul", "Add", "Triad", "Dot"}; - size_t sizes[5] = { - 2 * sizeof(T) * ARRAY_SIZE, - 2 * sizeof(T) * ARRAY_SIZE, - 3 * sizeof(T) * ARRAY_SIZE, - 3 * sizeof(T) * ARRAY_SIZE, - 2 * sizeof(T) * ARRAY_SIZE - }; + std::vector labels; + std::vector sizes; + + if (selection == Benchmark::All) + { + labels = {"Copy", "Mul", "Add", "Triad", "Dot"}; + sizes = { + 2 * sizeof(T) * ARRAY_SIZE, + 2 * sizeof(T) * ARRAY_SIZE, + 3 * sizeof(T) * ARRAY_SIZE, + 3 * sizeof(T) * ARRAY_SIZE, + 2 * sizeof(T) * ARRAY_SIZE}; + } else if (selection == Benchmark::Nstream) + { + labels = {"Nstream"}; + sizes = {4 * sizeof(T) * ARRAY_SIZE }; + } for (int i = 0; i < timings.size(); ++i) { @@ -416,13 +448,19 @@ void check_solution(const unsigned int ntimes, std::vector& a, std::vector for (unsigned int i = 0; i < ntimes; i++) { // Do STREAM! - if (! (selection == Benchmark::Triad)) + if (selection == Benchmark::All) { goldC = goldA; goldB = scalar * goldC; goldC = goldA + goldB; + goldA = goldB + scalar * goldC; + } else if (selection == Benchmark::Triad) + { + goldA = goldB + scalar * goldC; + } else if (selection == Benchmark::Nstream) + { + goldA += goldB + scalar * goldC; } - goldA = goldB + scalar * goldC; } // Do the reduction @@ -435,7 +473,7 @@ void check_solution(const unsigned int ntimes, std::vector& a, std::vector errB /= b.size(); double errC = std::accumulate(c.begin(), c.end(), 0.0, [&](double sum, const T val){ return sum + fabs(val - goldC); }); errC /= c.size(); - double errSum = fabs(sum - goldSum); + double errSum = fabs((sum - goldSum)/goldSum); double epsi = std::numeric_limits::epsilon() * 100.0; @@ -452,7 +490,7 @@ void check_solution(const unsigned int ntimes, std::vector& a, std::vector << "Validation failed on c[]. Average error " << errC << std::endl; // Check sum to 8 decimal places - if (!(selection == Benchmark::Triad) && errSum > 1.0E-8) + if (selection == Benchmark::All && errSum > 1.0E-8) std::cerr << "Validation failed on sum. Error " << errSum << std::endl << std::setprecision(15) @@ -523,6 +561,10 @@ void parseArguments(int argc, char *argv[]) { selection = Benchmark::Triad; } + else if (!std::string("--nstream-only").compare(argv[i])) + { + selection = Benchmark::Nstream; + } else if (!std::string("--csv").compare(argv[i])) { output_as_csv = true; @@ -544,6 +586,7 @@ void parseArguments(int argc, char *argv[]) std::cout << " -n --numtimes NUM Run the test NUM times (NUM >= 2)" << std::endl; std::cout << " --float Use floats (rather than doubles)" << std::endl; std::cout << " --triad-only Only run triad" << std::endl; + std::cout << " --nstream-only Only run nstream" << std::endl; std::cout << " --csv Output as csv table" << std::endl; std::cout << " --mibibytes Use MiB=2^20 for bandwidth calculation (default MB=10^6)" << std::endl; std::cout << std::endl;