40 template <
typename Float>
class vector {
51 std::shared_ptr<Float>
val;
90 vector(
const size_t N,
const Float value);
110 vector(
const std::initializer_list<Float> &list);
204 const std::uint32_t seed);
280 [[nodiscard]]
const Float *
data()
const {
return val.get(); }
288 [[nodiscard]] Float *
data() {
return val.get(); }
300 throw std::runtime_error(
"Error, GPU vector cant use resize");
303 std::shared_ptr<Float> tmp(
new Float[N], std::default_delete<Float[]>());
305 for (
size_t i = 0; i < copy_size; i++) {
306 tmp.get()[i] =
data()[i];
308 for (
size_t i = copy_size; i < N; i++) {
315 throw std::runtime_error(
"Error, not create vector cant use resize");
327 throw std::runtime_error(
"Error, GPU vector cant use push_back");
339 throw std::runtime_error(
"Error, not create vector cant use push_back");
353 [[nodiscard]]
const Float *
begin()
const {
return data(); }
369 [[nodiscard]]
const Float *
end()
const {
return data() +
size(); }
516 throw std::runtime_error(
"Error, GPU vector cant use operator[]");
532 bool compare_cpu_and_device =
false)
const;
545 bool compare_cpu_and_device =
false)
const;
558 bool compare_cpu_and_device =
false)
const;
571 bool compare_cpu_and_device =
false)
const;
vector(const view1D< matrix::Dense< Float >, Float > &vec)
copy from monolish::view1D from monolish::matrix::Dense
void send() const
send data to GPU
Float & operator[](size_t i)
reference to the element at position (v[i])
vector(const size_t N, const Float min, const Float max, const std::uint32_t seed)
create N length rand(min~max) vector with random seed
void operator=(const std::vector< Float > &vec)
copy vector from std::vector
void resize(size_t N, Float Val=0)
resize vector (only CPU)
vector(const std::vector< Float > &vec)
copy from std::vector
void operator=(const vector< Float > &vec)
copy vector, It is same as copy ( Copy the memory on CPU and GPU )
void move(const tensor::tensor_Dense< Float > &tensor_dense, int N)
bool operator!=(const vector< Float > &vec) const
Comparing vectors (v != vec)
void move(const tensor::tensor_Dense< Float > &tensor_dense)
bool operator==(const view1D< vector< Float >, Float > &vec) const
Comparing vectors (v == vec)
void recv()
recv data from GPU, and free data on GPU
void operator=(const view1D< tensor::tensor_Dense< Float >, Float > &vec)
copy vector, It is same as copy ( Copy the memory on CPU and GPU )
bool gpu_status
true: sended, false: not send
const Float * data() const
returns a direct pointer to the vector
std::shared_ptr< Float > val
vector data (pointer)
void fill(Float value)
fill vector elements with a scalar value
const Float * end() const
returns a end iterator
void operator=(const view1D< matrix::Dense< Float >, Float > &vec)
copy vector, It is same as copy ( Copy the memory on CPU and GPU )
vector(const std::initializer_list< Float > &list)
copy from initializer_list
bool operator==(const view1D< tensor::tensor_Dense< Float >, Float > &vec) const
Comparing vectors (v == vec)
size_t size() const
get vector size
bool operator==(const view1D< matrix::Dense< Float >, Float > &vec) const
Comparing vectors (v == vec)
bool operator!=(const view1D< tensor::tensor_Dense< Float >, Float > &vec) const
Comparing vectors (v != vec)
size_t get_offset() const
Float * end()
returns a end iterator
vector(const view1D< tensor::tensor_Dense< Float >, Float > &vec)
copy from monolish::view1D from monolish::tensor::tensor_Dense
bool val_create_flag
vector create flag;
vector< Float > operator-()
Sign inversion.
vector(const size_t N)
allocate size N vector
vector(const vector< Float > &vec)
copy from monolish::vector
std::size_t val_nnz
vector size
bool equal(const view1D< tensor::tensor_Dense< Float >, Float > &vec, bool compare_cpu_and_device=false) const
Comparing matrices (A == mat)
vector(const Float *start, const Float *end)
copy from pointer
Float * begin()
returns a begin iterator
Float * data()
returns a direct pointer to the vector
void push_back(Float Val)
Add a new element at the end of the vector (only CPU)
void nonfree_recv()
recv data from GPU (w/o free)
vector(const size_t N, const Float min, const Float max)
create N length rand(min~max) vector
bool equal(const view1D< vector< Float >, Float > &vec, bool compare_cpu_and_device=false) const
Comparing matrices (A == mat)
void print_all(std::string filename) const
print all elements to file
~vector()
destructor of vector, free GPU memory
vector(const view1D< vector< Float >, Float > &vec)
copy from monolish::view1D from vector
bool operator==(const vector< Float > &vec) const
Comparing vectors (v == vec)
void print_all(bool force_cpu=false) const
print all elements to standart I/O
bool equal(const view1D< matrix::Dense< Float >, Float > &vec, bool compare_cpu_and_device=false) const
Comparing matrices (A == mat)
void operator=(const view1D< vector< Float >, Float > &vec)
copy vector, It is same as copy ( Copy the memory on CPU and GPU )
const Float * begin() const
returns a begin iterator
bool get_device_mem_stat() const
true: sended, false: not send
std::size_t alloc_nnz
alloced vector size
bool equal(const vector< Float > &vec, bool compare_cpu_and_device=false) const
Comparing matrices (A == mat)
bool operator!=(const view1D< vector< Float >, Float > &vec) const
Comparing vectors (v != vec)
void device_free() const
free data on GPU
bool operator!=(const view1D< matrix::Dense< Float >, Float > &vec) const
Comparing vectors (v != vec)
vector(const size_t N, const Float value)
initialize size N vector, value to fill the container
size_t get_nnz() const
get vector size
void max(const matrix::CRS< double > &A, const matrix::CRS< double > &B, matrix::CRS< double > &C)
Create a new CRS matrix with greatest elements of two matrices (C[0:nnz] = max(A[0:nnz],...
void min(const matrix::CRS< double > &A, const matrix::CRS< double > &B, matrix::CRS< double > &C)
Create a new CRS matrix with smallest elements of two matrices (C[0:nnz] = min(A[0:nnz],...