monolish
0.14.2
MONOlithic LIner equation Solvers for Highly-parallel architecture
|
Go to the documentation of this file.
48 void Init(
int argc,
char **argv);
100 void Send(
double val,
int dst,
int tag)
const;
108 void Send(
float val,
int dst,
int tag)
const;
116 void Send(
int val,
int dst,
int tag)
const;
124 void Send(
size_t val,
int dst,
int tag)
const;
132 void Send(std::vector<double> &vec,
int dst,
int tag)
const;
140 void Send(std::vector<float> &vec,
int dst,
int tag)
const;
148 void Send(std::vector<int> &vec,
int dst,
int tag)
const;
156 void Send(std::vector<size_t> &vec,
int dst,
int tag)
const;
225 MPI_Status Recv(std::vector<double> &vec,
int src,
int tag)
const;
252 MPI_Status Recv(std::vector<size_t> &vec,
int src,
int tag)
const;
292 void Isend(
double val,
int dst,
int tag);
306 void Isend(
float val,
int dst,
int tag);
320 void Isend(
int val,
int dst,
int tag);
334 void Isend(
size_t val,
int dst,
int tag);
348 void Isend(
const std::vector<double> &vec,
int dst,
int tag);
362 void Isend(
const std::vector<float> &vec,
int dst,
int tag);
376 void Isend(
const std::vector<int> &vec,
int dst,
int tag);
390 void Isend(
const std::vector<size_t> &vec,
int dst,
int tag);
435 void Irecv(
double val,
int src,
int tag);
448 void Irecv(
float val,
int src,
int tag);
461 void Irecv(
int val,
int src,
int tag);
474 void Irecv(
size_t val,
int src,
int tag);
487 void Irecv(std::vector<double> &vec,
int src,
int tag);
500 void Irecv(std::vector<float> &vec,
int src,
int tag);
513 void Irecv(std::vector<int> &vec,
int src,
int tag);
526 void Irecv(std::vector<size_t> &vec,
int src,
int tag);
568 [[nodiscard]]
double Allreduce(
double val)
const;
575 [[nodiscard]]
float Allreduce(
float val)
const;
582 [[nodiscard]]
int Allreduce(
int val)
const;
589 [[nodiscard]]
size_t Allreduce(
size_t val)
const;
709 void Bcast(
double &val,
int root)
const;
717 void Bcast(
float &val,
int root)
const;
725 void Bcast(
int &val,
int root)
const;
733 void Bcast(
size_t &val,
int root)
const;
757 void Bcast(std::vector<double> &vec,
int root)
const;
765 void Bcast(std::vector<float> &vec,
int root)
const;
773 void Bcast(std::vector<int> &vec,
int root)
const;
781 void Bcast(std::vector<size_t> &vec,
int root)
const;
818 void Gather(std::vector<double> &sendvec, std::vector<double> &recvvec,
828 void Gather(std::vector<float> &sendvec, std::vector<float> &recvvec,
838 void Gather(std::vector<int> &sendvec, std::vector<int> &recvvec,
848 void Gather(std::vector<size_t> &sendvec, std::vector<size_t> &recvvec,
886 void Scatter(std::vector<double> &sendvec, std::vector<double> &recvvec,
896 void Scatter(std::vector<float> &sendvec, std::vector<float> &recvvec,
906 void Scatter(std::vector<int> &sendvec, std::vector<int> &recvvec,
916 void Scatter(std::vector<size_t> &sendvec, std::vector<size_t> &recvvec,
void Barrier() const
Blocks until all processes in the communicator have reached this routine.
int get_size()
get the number of processes
double Allreduce_prod(double val) const
MPI_Allreduce (MPI_PROD) for scalar. Combines values from all processes and distributes the result ba...
void Waitall()
Waits for all communications to complete.
void Gather(monolish::vector< double > &sendvec, monolish::vector< double > &recvvec, int root) const
MPI_Gather, Gathers vector from all processes The data is evenly divided and transmitted to each proc...
void Irecv(double val, int src, int tag)
MPI_Irecv for scalar. Performs a nonblocking recv.
MPI_Comm my_comm
MPI communicator, MPI_COMM_WORLD.
double Allreduce(double val) const
MPI_Allreduce (MPI_SUM) for scalar. Combines values from all processes and distributes the result bac...
double Allreduce_min(double val) const
MPI_Allreduce (MPI_MIN) for scalar. Combines values from all processes and distributes the result bac...
double Allreduce_max(double val) const
MPI_Allreduce (MPI_MAX) for scalar. Combines values from all processes and distributes the result bac...
void Isend(double val, int dst, int tag)
MPI_Isend for scalar. Performs a nonblocking send. Requests are stored internally....
struct ompi_communicator_t * MPI_Comm
static comm & get_instance()
bool Initialized() const
Indicates whether MPI_Init has been called.
C++ template MPI class, Functions of this class do nothing when MPI is disabled. Functions in this cl...
void Send(double val, int dst, int tag) const
MPI_Send for scalar. Performs a blocking send.
void Bcast(double &val, int root) const
MPI_Bcast, Broadcasts a message from the process with rank root to all other processes.
double Allreduce_sum(double val) const
MPI_Allreduce (MPI_SUM) for scalar. Combines values from all processes and distributes the result bac...
int get_rank()
get my rank number
MPI_Comm get_comm() const
get communicator
comm & operator=(const comm &)=delete
void set_comm(MPI_Comm external_comm)
set communicator
void Scatter(monolish::vector< double > &sendvec, monolish::vector< double > &recvvec, int root) const
MPI_Scatter, Sends data from one task to all tasks. The data is evenly divided and transmitted to eac...
std::vector< MPI_Request > requests
void Finalize()
Terminates MPI execution environment.
void Init()
Initialize the MPI execution environment.
MPI_Status Recv(double val, int src, int tag) const
MPI_Recv for scalar. Performs a blocking recv.