monolish  0.17.3-dev.16
MONOlithic LInear equation Solvers for Highly-parallel architecture
monolish_view1D.hpp
Go to the documentation of this file.
1 #pragma once
2 #include "./monolish_logger.hpp"
3 #include <cassert>
4 #include <exception>
5 #include <fstream>
6 #include <iostream>
7 #include <iterator>
8 #include <memory>
9 #include <omp.h>
10 #include <stdexcept>
11 #include <string>
12 #include <vector>
13 
14 #if USE_SXAT
15 #undef _HAS_CPP17
16 #endif
17 #include <random>
18 #if USE_SXAT
19 #define _HAS_CPP17 1
20 #endif
21 
22 #if defined USE_MPI
23 #include <mpi.h>
24 #endif
25 
26 namespace monolish {
27 template <typename Float> class vector;
28 
29 namespace matrix {
30 template <typename Float> class Dense;
31 template <typename Float> class CRS;
32 template <typename Float> class LinearOperator;
33 } // namespace matrix
34 
35 namespace tensor {
36 template <typename Float> class tensor_Dense;
37 } // namespace tensor
38 
50 template <typename TYPE, typename Float> class view1D {
51 private:
52  TYPE &target;
53  Float *target_data;
54  size_t first;
55  size_t last;
56  size_t range;
57 
58 public:
69  view1D(vector<Float> &x, const size_t start, const size_t size) : target(x) {
70  first = start;
71  range = size;
72  last = start + range;
73  target_data = x.data();
74  }
75 
86  view1D(matrix::Dense<Float> &A, const size_t start, const size_t size)
87  : target(A) {
88  first = start;
89  range = size;
90  last = start + range;
91  target_data = A.data();
92  }
93 
104  view1D(tensor::tensor_Dense<Float> &A, const size_t start, const size_t size)
105  : target(A) {
106  first = start;
107  range = size;
108  last = start + range;
109  target_data = A.data();
110  }
111 
122  view1D(view1D<vector<Float>, Float> &x, const size_t start, const size_t size)
123  : target(x) {
124  first = x.get_first() + start;
125  range = size;
126  last = first + range;
127  target_data = x.data();
128  }
129 
140  view1D(view1D<matrix::Dense<Float>, Float> &x, const size_t start,
141  const size_t size)
142  : target(x) {
143  first = x.get_first() + start;
144  range = size;
145  last = first + range;
146  target_data = x.data();
147  }
148 
159  view1D(view1D<tensor::tensor_Dense<Float>, Float> &x, const size_t start,
160  const size_t size)
161  : target(x) {
162  first = x.get_first() + start;
163  range = size;
164  last = first + range;
165  target_data = x.data();
166  }
167 
175  [[nodiscard]] std::string type() const {
176  return "view1D(" + target.type() + ")";
177  }
178 
179  // communication
180  // ///////////////////////////////////////////////////////////////////////////
188  void send() const { target.send(); };
189 
197  void recv() { target.recv(); };
198 
205  [[nodiscard]] size_t size() const { return range; }
206 
213  [[nodiscard]] size_t get_nnz() const { return range; }
214 
221  [[nodiscard]] size_t get_first() const { return first; }
222 
229  [[nodiscard]] size_t get_last() const { return last; }
230 
237  [[nodiscard]] size_t get_offset() const { return first; }
238 
239  [[nodiscard]] std::shared_ptr<Float> get_val() const { return target.val; }
240 
241  [[nodiscard]] size_t get_alloc_nnz() const { return target.alloc_nnz; }
242 
248  void set_first(size_t i) { first = i; }
249 
255  void set_last(size_t i) {
256  assert(first + i <= target.get_nnz());
257  last = i;
258  }
259 
268  [[nodiscard]] size_t get_device_mem_stat() const {
269  return target.get_device_mem_stat();
270  }
271 
279  [[nodiscard]] Float *data() const { return target_data; }
280 
287  [[nodiscard]] Float *data() { return target_data; }
288 
295  [[nodiscard]] Float *begin() const { return target_data + get_offset(); }
296 
303  [[nodiscard]] Float *begin() { return target_data + get_offset(); }
304 
311  [[nodiscard]] Float *end() const { return target_data + range; }
312 
319  [[nodiscard]] Float *end() { return target_data + range; }
320 
329  void fill(Float value);
330 
339  void print_all(bool force_cpu = false) const;
340 
351  void resize(size_t N) {
352  assert(first + N <= target.get_nnz());
353  range = N;
354  last = first + range;
355  }
356 
357  // operator
358  // ///////////////////////////////////////////////////////////////////////////
359 
372  void operator=(const vector<Float> &vec);
373 
386  void operator=(const view1D<vector<Float>, Float> &vec);
387 
400  void operator=(const view1D<matrix::Dense<Float>, Float> &vec);
401 
414  void operator=(const view1D<tensor::tensor_Dense<Float>, Float> &vec);
415 
425  void operator=(const std::vector<Float> &vec);
426 
436  [[nodiscard]] Float &operator[](const size_t i) {
437  if (target.get_device_mem_stat()) {
438  throw std::runtime_error("Error, GPU vector cant use operator[]");
439  }
440  return target_data[i + first];
441  }
442 };
445 } // namespace monolish
Dense format Matrix.
const Float * data() const
returns a direct pointer to the matrix
const Float * data() const
returns a direct pointer to the tensor
const Float * data() const
returns a direct pointer to the vector
view1D(tensor::tensor_Dense< Float > &A, const size_t start, const size_t size)
create view1D(start:start+range) from Dense tensor
void operator=(const view1D< tensor::tensor_Dense< Float >, Float > &vec)
copy vector, It is same as copy ( Copy the memory on CPU and GPU )
view1D(vector< Float > &x, const size_t start, const size_t size)
create view1D(start:start+range) from vector
void recv()
recv data from GPU, and free data on GPU
size_t get_alloc_nnz() const
size_t get_last() const
get end position
Float * data()
returns a direct pointer to the vector (dont include offset)
void operator=(const view1D< matrix::Dense< Float >, Float > &vec)
copy vector, It is same as copy ( Copy the memory on CPU and GPU )
Float * end() const
returns a end iterator
view1D(view1D< vector< Float >, Float > &x, const size_t start, const size_t size)
create view1D(start:start+range) from monolish::vector
Float * begin()
returns begin iterator (include offset)
std::shared_ptr< Float > get_val() const
void operator=(const view1D< vector< Float >, Float > &vec)
copy vector, It is same as copy ( Copy the memory on CPU and GPU )
void fill(Float value)
fill vector elements with a scalar value
Float * data() const
returns a direct pointer to the original vector (dont include offset)
void print_all(bool force_cpu=false) const
print all elements to standart I/O
size_t get_device_mem_stat() const
true: sended, false: not send
void operator=(const std::vector< Float > &vec)
copy vector from std::vector
view1D(view1D< tensor::tensor_Dense< Float >, Float > &x, const size_t start, const size_t size)
create view1D(start:start+range) from monolish::tensor::tensor_Dense
view1D(matrix::Dense< Float > &A, const size_t start, const size_t size)
create view1D(start:start+range) from Dense matrix
size_t get_nnz() const
get view1D size (same as size())
Float & operator[](const size_t i)
reference to the element at position
void operator=(const vector< Float > &vec)
copy vector, It is same as copy ( Copy the memory on CPU and GPU )
void set_last(size_t i)
change last position
size_t get_first() const
get first position
size_t size() const
get view1D size (range)
Float * end()
returns a end iterator
void set_first(size_t i)
change first position
size_t get_offset() const
get first position (same as get_first())
view1D(view1D< matrix::Dense< Float >, Float > &x, const size_t start, const size_t size)
create view1D(start:start+range) from monolish::matrix::Dense
void send() const
send data to GPU
Float * begin() const
returns begin iterator (include offset)
std::string type() const
get format name "view1D"
void resize(size_t N)
change last postion
monolish namespaces