6 template <
typename F1>
double Dnrm1_core(
const F1 &x) {
11 const double *xd = x.data();
12 size_t size = x.size();
13 const size_t xoffset = x.get_offset();
15 if (x.get_device_mem_stat() ==
true) {
17 #pragma omp target teams distribute parallel for reduction(+ : ans) map (tofrom: ans)
18 for (
size_t i = 0; i < size; i++) {
19 ans += std::abs(xd[i + xoffset]);
22 throw std::runtime_error(
23 "error USE_GPU is false, but get_device_mem_stat() == true");
26 #pragma omp parallel for reduction(+ : ans)
27 for (
size_t i = 0; i < size; i++) {
28 ans += std::abs(xd[i + xoffset]);
36 template <
typename F1>
float Snrm1_core(
const F1 &x) {
41 const float *xd = x.data();
42 size_t size = x.size();
43 const size_t xoffset = x.get_offset();
45 if (x.get_device_mem_stat() ==
true) {
47 #pragma omp target teams distribute parallel for reduction(+ : ans) map (tofrom: ans)
48 for (
size_t i = 0; i < size; i++) {
49 ans += std::abs(xd[i + xoffset]);
52 throw std::runtime_error(
53 "error USE_GPU is false, but get_device_mem_stat() == true");
56 #pragma omp parallel for reduction(+ : ans)
57 for (
size_t i = 0; i < size; i++) {
58 ans += std::abs(xd[i + xoffset]);