monolish  0.17.3-dev.23
MONOlithic LInear equation Solvers for Highly-parallel architecture
monolish_view1D.hpp
Go to the documentation of this file.
1 #pragma once
2 #include "./monolish_logger.hpp"
3 #include <cassert>
4 #include <exception>
5 #include <fstream>
6 #include <iostream>
7 #include <iterator>
8 #include <memory>
9 #include <omp.h>
10 #include <stdexcept>
11 #include <string>
12 #include <vector>
13 
14 #if USE_SXAT
15 #undef _HAS_CPP17
16 #endif
17 #include <random>
18 #if USE_SXAT
19 #define _HAS_CPP17 1
20 #endif
21 
22 #if defined USE_MPI
23 #include <mpi.h>
24 #endif
25 
26 namespace monolish {
27 template <typename Float> class vector;
28 
29 namespace matrix {
30 template <typename Float> class Dense;
31 template <typename Float> class CRS;
32 template <typename Float> class LinearOperator;
33 } // namespace matrix
34 
35 namespace tensor {
36 template <typename Float> class tensor_Dense;
37 } // namespace tensor
38 
50 template <typename TYPE, typename Float> class view1D {
51 private:
52  TYPE &target;
53  Float *target_data;
54  size_t first;
55  size_t last;
56  size_t range;
57 
58 public:
69  view1D(vector<Float> &x, const size_t start, const size_t size) : target(x) {
70  first = start;
71  range = size;
72  last = start + range;
73  target_data = x.data();
74  }
75 
86  view1D(matrix::Dense<Float> &A, const size_t start, const size_t size)
87  : target(A) {
88  first = start;
89  range = size;
90  last = start + range;
91  target_data = A.data();
92  }
93 
104  view1D(tensor::tensor_Dense<Float> &A, const size_t start, const size_t size)
105  : target(A) {
106  first = start;
107  range = size;
108  last = start + range;
109  target_data = A.data();
110  }
111 
122  view1D(view1D<vector<Float>, Float> &x, const size_t start, const size_t size)
123  : target(x) {
124  first = x.get_first() + start;
125  range = size;
126  last = first + range;
127  target_data = x.data();
128  }
129 
140  view1D(view1D<matrix::Dense<Float>, Float> &x, const size_t start,
141  const size_t size)
142  : target(x) {
143  first = x.get_first() + start;
144  range = size;
145  last = first + range;
146  target_data = x.data();
147  }
148 
159  view1D(view1D<tensor::tensor_Dense<Float>, Float> &x, const size_t start,
160  const size_t size)
161  : target(x) {
162  first = x.get_first() + start;
163  range = size;
164  last = first + range;
165  target_data = x.data();
166  }
167 
175  [[nodiscard]] std::string type() const {
176  return "view1D(" + target.type() + ")";
177  }
178 
179  // communication
180  // ///////////////////////////////////////////////////////////////////////////
188  void send() const { target.send(); };
189 
197  void recv() { target.recv(); };
198 
206  [[nodiscard]] std::shared_ptr<Float> get_val() { return target.get_val(); }
207 
214  [[nodiscard]] size_t size() const { return range; }
215 
222  [[nodiscard]] size_t get_nnz() const { return range; }
223 
230  [[nodiscard]] size_t get_first() const { return first; }
231 
238  [[nodiscard]] size_t get_last() const { return last; }
239 
246  [[nodiscard]] size_t get_offset() const { return first; }
247 
248  [[nodiscard]] std::shared_ptr<Float> get_val() const { return target.val; }
249 
250  [[nodiscard]] size_t get_alloc_nnz() const { return target.alloc_nnz; }
251 
257  void set_first(size_t i) { first = i; }
258 
264  void set_last(size_t i) {
265  assert(first + i <= target.get_nnz());
266  last = i;
267  }
268 
277  [[nodiscard]] size_t get_device_mem_stat() const {
278  return target.get_device_mem_stat();
279  }
280 
288  [[nodiscard]] Float *data() const { return target_data; }
289 
296  [[nodiscard]] Float *data() { return target_data; }
297 
304  [[nodiscard]] Float *begin() const { return target_data + get_offset(); }
305 
312  [[nodiscard]] Float *begin() { return target_data + get_offset(); }
313 
320  [[nodiscard]] Float *end() const { return target_data + range; }
321 
328  [[nodiscard]] Float *end() { return target_data + range; }
329 
338  void fill(Float value);
339 
348  void print_all(bool force_cpu = false) const;
349 
360  void resize(size_t N) {
361  assert(first + N <= target.get_nnz());
362  range = N;
363  last = first + range;
364  }
365 
366  // operator
367  // ///////////////////////////////////////////////////////////////////////////
368 
381  void operator=(const vector<Float> &vec);
382 
395  void operator=(const view1D<vector<Float>, Float> &vec);
396 
409  void operator=(const view1D<matrix::Dense<Float>, Float> &vec);
410 
423  void operator=(const view1D<tensor::tensor_Dense<Float>, Float> &vec);
424 
434  void operator=(const std::vector<Float> &vec);
435 
445  [[nodiscard]] Float &operator[](const size_t i) {
446  if (target.get_device_mem_stat()) {
447  throw std::runtime_error("Error, GPU vector cant use operator[]");
448  }
449  return target_data[i + first];
450  }
451 };
454 } // namespace monolish
Dense format Matrix.
const Float * data() const
returns a direct pointer to the matrix
const Float * data() const
returns a direct pointer to the tensor
const Float * data() const
returns a direct pointer to the vector
view1D(tensor::tensor_Dense< Float > &A, const size_t start, const size_t size)
create view1D(start:start+range) from Dense tensor
void operator=(const view1D< tensor::tensor_Dense< Float >, Float > &vec)
copy vector, It is same as copy ( Copy the memory on CPU and GPU )
view1D(vector< Float > &x, const size_t start, const size_t size)
create view1D(start:start+range) from vector
void recv()
recv data from GPU, and free data on GPU
size_t get_alloc_nnz() const
size_t get_last() const
get end position
Float * data()
returns a direct pointer to the vector (dont include offset)
void operator=(const view1D< matrix::Dense< Float >, Float > &vec)
copy vector, It is same as copy ( Copy the memory on CPU and GPU )
Float * end() const
returns a end iterator
view1D(view1D< vector< Float >, Float > &x, const size_t start, const size_t size)
create view1D(start:start+range) from monolish::vector
Float * begin()
returns begin iterator (include offset)
std::shared_ptr< Float > get_val() const
void operator=(const view1D< vector< Float >, Float > &vec)
copy vector, It is same as copy ( Copy the memory on CPU and GPU )
void fill(Float value)
fill vector elements with a scalar value
Float * data() const
returns a direct pointer to the original vector (dont include offset)
void print_all(bool force_cpu=false) const
print all elements to standart I/O
size_t get_device_mem_stat() const
true: sended, false: not send
void operator=(const std::vector< Float > &vec)
copy vector from std::vector
view1D(view1D< tensor::tensor_Dense< Float >, Float > &x, const size_t start, const size_t size)
create view1D(start:start+range) from monolish::tensor::tensor_Dense
view1D(matrix::Dense< Float > &A, const size_t start, const size_t size)
create view1D(start:start+range) from Dense matrix
size_t get_nnz() const
get view1D size (same as size())
Float & operator[](const size_t i)
reference to the element at position
void operator=(const vector< Float > &vec)
copy vector, It is same as copy ( Copy the memory on CPU and GPU )
void set_last(size_t i)
change last position
size_t get_first() const
get first position
std::shared_ptr< Float > get_val()
get shared_ptr of val
size_t size() const
get view1D size (range)
Float * end()
returns a end iterator
void set_first(size_t i)
change first position
size_t get_offset() const
get first position (same as get_first())
view1D(view1D< matrix::Dense< Float >, Float > &x, const size_t start, const size_t size)
create view1D(start:start+range) from monolish::matrix::Dense
void send() const
send data to GPU
Float * begin() const
returns begin iterator (include offset)
std::string type() const
get format name "view1D"
void resize(size_t N)
change last postion
monolish namespaces