monolish  0.17.1
MONOlithic LInear equation Solvers for Highly-parallel architecture
monolish_tensor_coo.hpp
Go to the documentation of this file.
1 #pragma once
2 #include "monolish_matrix.hpp"
3 #include "monolish_tensor.hpp"
4 #include "monolish_vector.hpp"
5 
6 namespace monolish {
7 template <typename Float> class vector;
8 template <typename TYPE, typename Float> class view1D;
9 namespace tensor {
10 template <typename Float> class tensor_Dense;
11 template <typename Float> class tensor_COO {
12 private:
16  std::vector<size_t> shape;
17 
21  mutable bool gpu_status = false;
22 
23 public:
28  std::vector<std::vector<size_t>> index;
29 
34  std::shared_ptr<Float> val;
35 
39  size_t val_nnz = 0;
40 
44  std::size_t alloc_nnz = 0;
45 
49  bool val_create_flag = false;
50 
51  tensor_COO() : shape(), gpu_status(false), index(), val_nnz(0) {
52  val_create_flag = true;
53  }
54 
63  tensor_COO(const std::vector<size_t> &shape_)
64  : shape(shape_), gpu_status(false), index(), val_nnz(0) {
65  val_create_flag = true;
66  }
67 
77 
87  val_create_flag = true;
88  convert(tens);
89  }
90 
102  tensor_COO(const std::vector<size_t> &shape_,
103  const std::vector<std::vector<size_t>> &index_,
104  const Float *value);
105 
115 
125  tensor_COO(const tensor_COO<Float> &coo, Float value);
126 
135  void print_all(bool force_cpu = false) const;
136 
145  void print_all(const std::string filename) const;
146 
147  // TODO
155  [[nodiscard]] double get_data_size() const {
156  return get_nnz() * sizeof(Float) / 1.0e+9;
157  }
158 
168  [[nodiscard]] Float at(const std::vector<size_t> &pos) const;
169 
179  [[nodiscard]] Float at(const std::vector<size_t> &pos) {
180  return static_cast<const tensor_COO *>(this)->at(pos);
181  };
182 
193  void set_ptr(const std::vector<size_t> &shape,
194  const std::vector<std::vector<size_t>> &index,
195  const std::vector<Float> &v);
196 
208  void set_ptr(const std::vector<size_t> &shape,
209  const std::vector<std::vector<size_t>> &index,
210  const size_t vsize, const Float *v);
211 
219  [[nodiscard]] std::vector<size_t> get_shape() const { return shape; }
220 
228  [[nodiscard]] size_t get_nnz() const { return val_nnz; }
229 
238  void fill(Float value);
239 
247  void set_shape(const std::vector<size_t> &shape) { this->shape = shape; }
248 
253  [[nodiscard]] bool get_device_mem_stat() const { return gpu_status; }
254 
261  [[nodiscard]] const Float *data() const { return val.get(); }
262 
269  [[nodiscard]] Float *data() { return val.get(); }
270 
279  void resize(const size_t N, Float Val = 0) {
280  if (get_device_mem_stat()) {
281  throw std::runtime_error("Error, GPU matrix cant use resize");
282  }
283  if (val_create_flag) {
284  std::shared_ptr<Float> tmp(new Float[N], std::default_delete<Float[]>());
285  size_t copy_size = std::min(val_nnz, N);
286  for (size_t i = 0; i < copy_size; ++i) {
287  tmp.get()[i] = data()[i];
288  }
289  for (size_t i = copy_size; i < N; ++i) {
290  tmp.get()[i] = Val;
291  }
292  val = tmp;
293  alloc_nnz = N;
294  val_nnz = N;
295 
296  index.resize(N);
297  } else {
298  throw std::runtime_error("Error, not create vector cant use resize");
299  }
300  }
301 
309  [[nodiscard]] std::string type() const { return "tensor_COO"; }
310 
319  void diag(vector<Float> &vec) const;
320  void diag(view1D<vector<Float>, Float> &vec) const;
321  void diag(view1D<matrix::Dense<Float>, Float> &vec) const;
322  void diag(view1D<tensor::tensor_Dense<Float>, Float> &vec) const;
323 
334  void operator=(const tensor_COO<Float> &tens);
335 
345  [[nodiscard]] Float &operator[](size_t i) {
346  if (get_device_mem_stat()) {
347  throw std::runtime_error("Error, GPU vector cant use operator[]");
348  }
349  return data()[i];
350  }
351 
362  [[nodiscard]] bool equal(const tensor_COO<Float> &tens,
363  bool compare_cpu_and_device = false) const;
364 
376  [[nodiscard]] bool operator==(const tensor_COO<Float> &tens) const;
377 
389  [[nodiscard]] bool operator!=(const tensor_COO<Float> &tens) const;
390 
400  size_t get_index(const std::vector<size_t> &pos) {
401  if (pos.size() != this->shape.size()) {
402  throw std::runtime_error("pos size should be same with the shape");
403  }
404  size_t ind = 0;
405  for (auto i = 0; i < pos.size(); ++i) {
406  ind *= this->shape[i];
407  ind += pos[i];
408  }
409  return ind;
410  }
411 
421  std::vector<size_t> get_index(const size_t pos) {
422  std::vector<size_t> ind(this->shape.size(), 0);
423  auto pos_copy = pos;
424  for (int i = (int)this->shape.size() - 1; i >= 0; --i) {
425  ind[i] = pos_copy % this->shape[i];
426  pos_copy /= this->shape[i];
427  }
428  return ind;
429  }
430 
440  void insert(const std::vector<size_t> &pos, const Float val);
441 
442 private:
443  void _q_sort(int lo, int hi);
444 
445 public:
454  void sort(bool merge);
455 };
456 } // namespace tensor
457 } // namespace monolish
Dense format Matrix.
void convert(const tensor::tensor_Dense< Float > &tens)
Create tensor_COO tensor from tensor_Dense tensor.
void diag(view1D< tensor::tensor_Dense< Float >, Float > &vec) const
bool operator!=(const tensor_COO< Float > &tens) const
Comparing tensors (A != tens)
std::size_t alloc_nnz
alloced matrix size
void resize(const size_t N, Float Val=0)
resize tensor value
void diag(vector< Float > &vec) const
get diag. vector
void sort(bool merge)
sort tensor_COO tensor elements (and merge elements)
std::string type() const
get format name "tensor_COO"
void _q_sort(int lo, int hi)
tensor_COO(const std::vector< size_t > &shape_)
Initialize tensor_COO tensor.
void set_ptr(const std::vector< size_t > &shape, const std::vector< std::vector< size_t >> &index, const std::vector< Float > &v)
Set tensor_COO array from std::vector.
const Float * data() const
returns a direct pointer to the tensor
bool get_device_mem_stat() const
true: sended, false: not send
size_t get_index(const std::vector< size_t > &pos)
get aligned index from vector index (A[pos] = A[ind[0]][ind[1]]...)
std::vector< size_t > get_index(const size_t pos)
get vector index from aligned index (A[pos[0]][pos[1]]... = A[ind])
void print_all(bool force_cpu=false) const
print all elements to standard I/O
std::vector< size_t > shape
shape
std::shared_ptr< Float > val
Coodinate format value array (pointer), which stores values of the non-zero elements.
tensor_COO(const std::vector< size_t > &shape_, const std::vector< std::vector< size_t >> &index_, const Float *value)
Create tensor_COO tensor from n-origin array.
void print_all(const std::string filename) const
print all elements to file
std::vector< std::vector< size_t > > index
Coodinate format index, which stores index numbers of the non-zero elements (size nnz)
Float * data()
returns a direct pointer to the tensor
size_t get_nnz() const
get # of non-zeros
bool operator==(const tensor_COO< Float > &tens) const
Comparing tensors (A == tens)
std::vector< size_t > get_shape() const
get shape
bool equal(const tensor_COO< Float > &tens, bool compare_cpu_and_device=false) const
Comparing tensors (A == tens)
size_t val_nnz
# of non-zero element
bool val_create_flag
matrix create flag;
Float & operator[](size_t i)
reference to the element at position (v[i])
void operator=(const tensor_COO< Float > &tens)
tensor copy
void set_shape(const std::vector< size_t > &shape)
Set shape.
tensor_COO(const tensor_COO< Float > &coo, Float value)
Initialize tensor_COO tensor of the same size as input tensor.
Float at(const std::vector< size_t > &pos)
get element A[pos[0]][pos[1]]... (onlu CPU)
bool gpu_status
true: sended, false: not send
void set_ptr(const std::vector< size_t > &shape, const std::vector< std::vector< size_t >> &index, const size_t vsize, const Float *v)
Set tensor_COO array from array.
void fill(Float value)
fill tensor elements with a scalar value
void diag(view1D< matrix::Dense< Float >, Float > &vec) const
Float at(const std::vector< size_t > &pos) const
get element A[pos[0]][pos[1]]...
void diag(view1D< vector< Float >, Float > &vec) const
tensor_COO(const tensor::tensor_Dense< Float > &tens)
Create tensor_COO tensor from tensor_Dense tensor.
tensor_COO(const tensor_COO< Float > &coo)
Create tensor_COO tensor from tensor_COO tensor.
void insert(const std::vector< size_t > &pos, const Float val)
insert element A[pos[0]][pos[1]]...
double get_data_size() const
Memory data space required by the matrix.
void min(const matrix::CRS< double > &A, const matrix::CRS< double > &B, matrix::CRS< double > &C)
Create a new CRS matrix with smallest elements of two matrices (C[0:nnz] = min(A[0:nnz],...
monolish namespaces