monolish  0.14.2
MONOlithic LIner equation Solvers for Highly-parallel architecture
monolish_mpi.cpp
Go to the documentation of this file.
1 // This code is generated by gen_vector_blas.sh
2 #include "../internal/monolish_internal.hpp"
3 #include "Bcast.hpp"
4 #include "Gather.hpp"
5 #include "Scatter.hpp"
6 #include "allreduce.hpp"
7 #include "isend_irecv.hpp"
8 #include "send_recv.hpp"
9 
10 namespace monolish::mpi {
11 
12 void comm::set_comm(MPI_Comm external_comm) {
13  Logger &logger = Logger::get_instance();
14  logger.util_in(monolish_func);
15  my_comm = external_comm;
16  logger.util_out();
17 }
18 
19 void comm::Init() {
20  Logger &logger = Logger::get_instance();
21  logger.util_in(monolish_func);
22 #if defined MONOLISH_USE_MPI
23  assert(MPI_Init(nullptr, nullptr) == MPI_SUCCESS);
24  if (get_comm() == 0) {
25  set_comm(MPI_COMM_WORLD);
26  }
27 #endif
28  logger.util_out();
29 }
30 
31 void comm::Init(int argc, char **argv) {
32  Logger &logger = Logger::get_instance();
33  logger.util_in(monolish_func);
34 #if defined MONOLISH_USE_MPI
35  assert(MPI_Init(&argc, &argv) == MPI_SUCCESS);
36  if (get_comm() == 0) {
37  set_comm(MPI_COMM_WORLD);
38  }
39 #endif
40  logger.util_out();
41 }
42 
43 bool comm::Initialized() const {
44  Logger &logger = Logger::get_instance();
45  logger.util_in(monolish_func);
46 #if defined MONOLISH_USE_MPI
47  int flag;
48  assert(MPI_Initialized(&flag) == MPI_SUCCESS);
49  logger.util_out();
50  return (flag == 0) ? true : false;
51 #endif
52  logger.util_out();
53  return true;
54 }
55 
57  Logger &logger = Logger::get_instance();
58  logger.util_in(monolish_func);
59 #if defined MONOLISH_USE_MPI
60  assert(MPI_Finalize() == MPI_SUCCESS);
61 #endif
62  logger.util_out();
63 }
64 
66 
68  Logger &logger = Logger::get_instance();
69  logger.util_in(monolish_func);
70  int rank = 0;
71 #if defined MONOLISH_USE_MPI
72  MPI_Comm_rank(get_comm(), &rank);
73  logger.util_out();
74  return rank;
75 #endif
76  logger.util_out();
77  return rank;
78 }
79 
81  Logger &logger = Logger::get_instance();
82  logger.util_in(monolish_func);
83  int size = 1;
84 #if defined MONOLISH_USE_MPI
85  MPI_Comm_size(get_comm(), &size);
86  logger.util_out();
87  return size;
88 #endif
89  logger.util_out();
90  return size;
91 }
92 
94 
95 void comm::Barrier() const {
96  Logger &logger = Logger::get_instance();
97  logger.util_in(monolish_func);
98 #if defined MONOLISH_USE_MPI
99  MPI_Barrier(get_comm());
100  logger.util_out();
101 #endif
102  logger.util_out();
103 }
104 
105 void comm::Send(double val, int dst, int tag) const {
106  Send_core(val, dst, tag, get_comm());
107 }
108 void comm::Send(float val, int dst, int tag) const {
109  Send_core(val, dst, tag, get_comm());
110 }
111 void comm::Send(int val, int dst, int tag) const {
112  Send_core(val, dst, tag, get_comm());
113 }
114 void comm::Send(size_t val, int dst, int tag) const {
115  Send_core(val, dst, tag, get_comm());
116 }
117 
118 void comm::Send(std::vector<double> &vec, int dst, int tag) const {
119  Send_core(vec, dst, tag, get_comm());
120 }
121 void comm::Send(std::vector<float> &vec, int dst, int tag) const {
122  Send_core(vec, dst, tag, get_comm());
123 }
124 void comm::Send(std::vector<int> &vec, int dst, int tag) const {
125  Send_core(vec, dst, tag, get_comm());
126 }
127 void comm::Send(std::vector<size_t> &vec, int dst, int tag) const {
128  Send_core(vec, dst, tag, get_comm());
129 }
130 
131 void comm::Send(monolish::vector<double> &vec, int dst, int tag) const {
132  Send_core(vec, dst, tag, get_comm());
133 }
134 void comm::Send(monolish::vector<float> &vec, int dst, int tag) const {
135  Send_core(vec, dst, tag, get_comm());
136 }
137 
138 MPI_Status comm::Recv(double val, int src, int tag) const {
139  return Recv_core(val, src, tag, get_comm());
140 }
141 MPI_Status comm::Recv(float val, int src, int tag) const {
142  return Recv_core(val, src, tag, get_comm());
143 }
144 MPI_Status comm::Recv(int val, int src, int tag) const {
145  return Recv_core(val, src, tag, get_comm());
146 }
147 MPI_Status comm::Recv(size_t val, int src, int tag) const {
148  return Recv_core(val, src, tag, get_comm());
149 }
150 
151 MPI_Status comm::Recv(std::vector<double> &vec, int src, int tag) const {
152  return Recv_core(vec, src, tag, get_comm());
153 }
154 MPI_Status comm::Recv(std::vector<float> &vec, int src, int tag) const {
155  return Recv_core(vec, src, tag, get_comm());
156 }
157 MPI_Status comm::Recv(std::vector<int> &vec, int src, int tag) const {
158  return Recv_core(vec, src, tag, get_comm());
159 }
160 MPI_Status comm::Recv(std::vector<size_t> &vec, int src, int tag) const {
161  return Recv_core(vec, src, tag, get_comm());
162 }
163 
164 MPI_Status comm::Recv(monolish::vector<double> &vec, int src, int tag) const {
165  return Recv_core(vec, src, tag, get_comm());
166 }
167 MPI_Status comm::Recv(monolish::vector<float> &vec, int src, int tag) const {
168  return Recv_core(vec, src, tag, get_comm());
169 }
170 
171 void comm::Isend(double val, int dst, int tag) {
172  Isend_core(val, dst, tag, get_comm(), requests);
173 }
174 void comm::Isend(float val, int dst, int tag) {
175  Isend_core(val, dst, tag, get_comm(), requests);
176 }
177 void comm::Isend(int val, int dst, int tag) {
178  Isend_core(val, dst, tag, get_comm(), requests);
179 }
180 void comm::Isend(size_t val, int dst, int tag) {
181  Isend_core(val, dst, tag, get_comm(), requests);
182 }
183 
184 void comm::Isend(const std::vector<double> &vec, int dst, int tag) {
185  Isend_core(vec, dst, tag, get_comm(), requests);
186 }
187 void comm::Isend(const std::vector<float> &vec, int dst, int tag) {
188  Isend_core(vec, dst, tag, get_comm(), requests);
189 }
190 void comm::Isend(const std::vector<int> &vec, int dst, int tag) {
191  Isend_core(vec, dst, tag, get_comm(), requests);
192 }
193 void comm::Isend(const std::vector<size_t> &vec, int dst, int tag) {
194  Isend_core(vec, dst, tag, get_comm(), requests);
195 }
196 
197 void comm::Isend(const monolish::vector<double> &vec, int dst, int tag) {
198  Isend_core(vec, dst, tag, get_comm(), requests);
199 }
200 void comm::Isend(const monolish::vector<float> &vec, int dst, int tag) {
201  Isend_core(vec, dst, tag, get_comm(), requests);
202 }
203 
204 void comm::Irecv(double val, int src, int tag) {
205  Irecv_core(val, src, tag, get_comm(), requests);
206 }
207 void comm::Irecv(float val, int src, int tag) {
208  Irecv_core(val, src, tag, get_comm(), requests);
209 }
210 void comm::Irecv(int val, int src, int tag) {
211  Irecv_core(val, src, tag, get_comm(), requests);
212 }
213 void comm::Irecv(size_t val, int src, int tag) {
214  Irecv_core(val, src, tag, get_comm(), requests);
215 }
216 
217 void comm::Irecv(std::vector<double> &vec, int src, int tag) {
218  Irecv_core(vec, src, tag, get_comm(), requests);
219 }
220 void comm::Irecv(std::vector<float> &vec, int src, int tag) {
221  Irecv_core(vec, src, tag, get_comm(), requests);
222 }
223 void comm::Irecv(std::vector<int> &vec, int src, int tag) {
224  Irecv_core(vec, src, tag, get_comm(), requests);
225 }
226 void comm::Irecv(std::vector<size_t> &vec, int src, int tag) {
227  Irecv_core(vec, src, tag, get_comm(), requests);
228 }
229 
230 void comm::Irecv(monolish::vector<double> &vec, int src, int tag) {
231  Irecv_core(vec, src, tag, get_comm(), requests);
232 }
233 void comm::Irecv(monolish::vector<float> &vec, int src, int tag) {
234  Irecv_core(vec, src, tag, get_comm(), requests);
235 }
236 
237 void comm::Waitall() { Waitall_core(requests); }
238 
239 double comm::Allreduce(double val) const {
240  return Allreduce_core(val, MPI_SUM, get_comm());
241 }
242 float comm::Allreduce(float val) const {
243  return Allreduce_core(val, MPI_SUM, get_comm());
244 }
245 int comm::Allreduce(int val) const {
246  return Allreduce_core(val, MPI_SUM, get_comm());
247 }
248 size_t comm::Allreduce(size_t val) const {
249  return Allreduce_core(val, MPI_SUM, get_comm());
250 }
251 
252 double comm::Allreduce_sum(double val) const {
253  return Allreduce_core(val, MPI_SUM, get_comm());
254 }
255 float comm::Allreduce_sum(float val) const {
256  return Allreduce_core(val, MPI_SUM, get_comm());
257 }
258 int comm::Allreduce_sum(int val) const {
259  return Allreduce_core(val, MPI_SUM, get_comm());
260 }
261 size_t comm::Allreduce_sum(size_t val) const {
262  return Allreduce_core(val, MPI_SUM, get_comm());
263 }
264 
265 double comm::Allreduce_prod(double val) const {
266  return Allreduce_core(val, MPI_PROD, get_comm());
267 }
268 float comm::Allreduce_prod(float val) const {
269  return Allreduce_core(val, MPI_PROD, get_comm());
270 }
271 int comm::Allreduce_prod(int val) const {
272  return Allreduce_core(val, MPI_PROD, get_comm());
273 }
274 size_t comm::Allreduce_prod(size_t val) const {
275  return Allreduce_core(val, MPI_PROD, get_comm());
276 }
277 
278 double comm::Allreduce_max(double val) const {
279  return Allreduce_core(val, MPI_MAX, get_comm());
280 }
281 float comm::Allreduce_max(float val) const {
282  return Allreduce_core(val, MPI_MAX, get_comm());
283 }
284 int comm::Allreduce_max(int val) const {
285  return Allreduce_core(val, MPI_MAX, get_comm());
286 }
287 size_t comm::Allreduce_max(size_t val) const {
288  return Allreduce_core(val, MPI_MAX, get_comm());
289 }
290 
291 double comm::Allreduce_min(double val) const {
292  return Allreduce_core(val, MPI_MIN, get_comm());
293 }
294 float comm::Allreduce_min(float val) const {
295  return Allreduce_core(val, MPI_MIN, get_comm());
296 }
297 int comm::Allreduce_min(int val) const {
298  return Allreduce_core(val, MPI_MIN, get_comm());
299 }
300 size_t comm::Allreduce_min(size_t val) const {
301  return Allreduce_core(val, MPI_MIN, get_comm());
302 }
303 
304 void comm::Bcast(double &val, int root) const {
305  Bcast_core(val, root, get_comm());
306 }
307 void comm::Bcast(float &val, int root) const {
308  Bcast_core(val, root, get_comm());
309 }
310 void comm::Bcast(int &val, int root) const {
311  Bcast_core(val, root, get_comm());
312 }
313 void comm::Bcast(size_t &val, int root) const {
314  Bcast_core(val, root, get_comm());
315 }
316 
317 void comm::Bcast(std::vector<double> &vec, int root) const {
318  Bcast_core(vec, root, get_comm());
319 }
320 void comm::Bcast(std::vector<float> &vec, int root) const {
321  Bcast_core(vec, root, get_comm());
322 }
323 void comm::Bcast(std::vector<int> &vec, int root) const {
324  Bcast_core(vec, root, get_comm());
325 }
326 void comm::Bcast(std::vector<size_t> &vec, int root) const {
327  Bcast_core(vec, root, get_comm());
328 }
329 
330 void comm::Bcast(monolish::vector<double> &vec, int root) const {
331  Bcast_core(vec, root, get_comm());
332 }
333 void comm::Bcast(monolish::vector<float> &vec, int root) const {
334  Bcast_core(vec, root, get_comm());
335 }
336 
338  monolish::vector<double> &recvvec, int root) const {
339  Gather_core(sendvec, recvvec, root, get_comm());
340 }
342  monolish::vector<float> &recvvec, int root) const {
343  Gather_core(sendvec, recvvec, root, get_comm());
344 }
345 
346 void comm::Gather(std::vector<double> &sendvec, std::vector<double> &recvvec,
347  int root) const {
348  Gather_core(sendvec, recvvec, root, get_comm());
349 }
350 void comm::Gather(std::vector<float> &sendvec, std::vector<float> &recvvec,
351  int root) const {
352  Gather_core(sendvec, recvvec, root, get_comm());
353 }
354 void comm::Gather(std::vector<int> &sendvec, std::vector<int> &recvvec,
355  int root) const {
356  Gather_core(sendvec, recvvec, root, get_comm());
357 }
358 void comm::Gather(std::vector<size_t> &sendvec, std::vector<size_t> &recvvec,
359  int root) const {
360  Gather_core(sendvec, recvvec, root, get_comm());
361 }
362 
364  monolish::vector<double> &recvvec, int root) const {
365  Scatter_core(sendvec, recvvec, root, get_comm());
366 }
368  monolish::vector<float> &recvvec, int root) const {
369  Scatter_core(sendvec, recvvec, root, get_comm());
370 }
371 
372 void comm::Scatter(std::vector<double> &sendvec, std::vector<double> &recvvec,
373  int root) const {
374  Scatter_core(sendvec, recvvec, root, get_comm());
375 }
376 void comm::Scatter(std::vector<float> &sendvec, std::vector<float> &recvvec,
377  int root) const {
378  Scatter_core(sendvec, recvvec, root, get_comm());
379 }
380 void comm::Scatter(std::vector<int> &sendvec, std::vector<int> &recvvec,
381  int root) const {
382  Scatter_core(sendvec, recvvec, root, get_comm());
383 }
384 void comm::Scatter(std::vector<size_t> &sendvec, std::vector<size_t> &recvvec,
385  int root) const {
386  Scatter_core(sendvec, recvvec, root, get_comm());
387 }
388 
389 } // namespace monolish::mpi
monolish::mpi::comm::Barrier
void Barrier() const
Blocks until all processes in the communicator have reached this routine.
Definition: monolish_mpi.cpp:95
monolish::mpi::comm::get_size
int get_size()
get the number of processes
Definition: monolish_mpi.cpp:80
monolish_func
#define monolish_func
Definition: monolish_logger.hpp:9
monolish::mpi::comm::Allreduce_prod
double Allreduce_prod(double val) const
MPI_Allreduce (MPI_PROD) for scalar. Combines values from all processes and distributes the result ba...
Definition: monolish_mpi.cpp:265
monolish::Logger
logger class (singleton, for developper class)
Definition: monolish_logger.hpp:19
MPI_PROD
#define MPI_PROD
Definition: mpi_dummy.hpp:68
Bcast.hpp
monolish::mpi::comm::Waitall
void Waitall()
Waits for all communications to complete.
Definition: monolish_mpi.cpp:237
monolish::mpi::comm::Gather
void Gather(monolish::vector< double > &sendvec, monolish::vector< double > &recvvec, int root) const
MPI_Gather, Gathers vector from all processes The data is evenly divided and transmitted to each proc...
Definition: monolish_mpi.cpp:337
monolish::mpi::comm::Irecv
void Irecv(double val, int src, int tag)
MPI_Irecv for scalar. Performs a nonblocking recv.
Definition: monolish_mpi.cpp:204
allreduce.hpp
send_recv.hpp
monolish::mpi::comm::my_comm
MPI_Comm my_comm
MPI communicator, MPI_COMM_WORLD.
Definition: monolish_mpi_core.hpp:21
monolish::Logger::util_out
void util_out()
Definition: logger_utils.cpp:123
MPI_Status
int MPI_Status
Definition: mpi_dummy.hpp:37
monolish::Logger::util_in
void util_in(const std::string func_name)
Definition: logger_utils.cpp:113
isend_irecv.hpp
MPI_SUM
#define MPI_SUM
Definition: mpi_dummy.hpp:67
Scatter.hpp
monolish::mpi::comm::Allreduce
double Allreduce(double val) const
MPI_Allreduce (MPI_SUM) for scalar. Combines values from all processes and distributes the result bac...
Definition: monolish_mpi.cpp:239
monolish::mpi::comm::Allreduce_min
double Allreduce_min(double val) const
MPI_Allreduce (MPI_MIN) for scalar. Combines values from all processes and distributes the result bac...
Definition: monolish_mpi.cpp:291
MPI_MIN
#define MPI_MIN
Definition: mpi_dummy.hpp:66
monolish::mpi::comm::Allreduce_max
double Allreduce_max(double val) const
MPI_Allreduce (MPI_MAX) for scalar. Combines values from all processes and distributes the result bac...
Definition: monolish_mpi.cpp:278
monolish::mpi::comm::Isend
void Isend(double val, int dst, int tag)
MPI_Isend for scalar. Performs a nonblocking send. Requests are stored internally....
Definition: monolish_mpi.cpp:171
MPI_Comm
struct ompi_communicator_t * MPI_Comm
Definition: mpi_dummy.hpp:40
monolish::mpi::comm::Initialized
bool Initialized() const
Indicates whether MPI_Init has been called.
Definition: monolish_mpi.cpp:43
Gather.hpp
monolish::mpi
C++ template MPI class, Functions of this class do nothing when MPI is disabled. Functions in this cl...
Definition: monolish_mpi_core.hpp:12
MPI_MAX
#define MPI_MAX
Definition: mpi_dummy.hpp:65
monolish::mpi::comm::Send
void Send(double val, int dst, int tag) const
MPI_Send for scalar. Performs a blocking send.
Definition: monolish_mpi.cpp:105
monolish::vector
vector class
Definition: monolish_coo.hpp:32
monolish::mpi::comm::Bcast
void Bcast(double &val, int root) const
MPI_Bcast, Broadcasts a message from the process with rank root to all other processes.
Definition: monolish_mpi.cpp:304
monolish::mpi::comm::Allreduce_sum
double Allreduce_sum(double val) const
MPI_Allreduce (MPI_SUM) for scalar. Combines values from all processes and distributes the result bac...
Definition: monolish_mpi.cpp:252
monolish::mpi::comm::get_rank
int get_rank()
get my rank number
Definition: monolish_mpi.cpp:67
monolish::mpi::comm::get_comm
MPI_Comm get_comm() const
get communicator
Definition: monolish_mpi_core.hpp:62
monolish::mpi::comm::set_comm
void set_comm(MPI_Comm external_comm)
set communicator
Definition: monolish_mpi.cpp:12
monolish::Logger::get_instance
static Logger & get_instance()
Definition: monolish_logger.hpp:42
monolish::mpi::comm::Scatter
void Scatter(monolish::vector< double > &sendvec, monolish::vector< double > &recvvec, int root) const
MPI_Scatter, Sends data from one task to all tasks. The data is evenly divided and transmitted to eac...
Definition: monolish_mpi.cpp:363
monolish::mpi::comm::requests
std::vector< MPI_Request > requests
Definition: monolish_mpi_core.hpp:23
monolish::mpi::comm::Finalize
void Finalize()
Terminates MPI execution environment.
Definition: monolish_mpi.cpp:56
monolish::mpi::comm::Init
void Init()
Initialize the MPI execution environment.
Definition: monolish_mpi.cpp:19
monolish::mpi::comm::Recv
MPI_Status Recv(double val, int src, int tag) const
MPI_Recv for scalar. Performs a blocking recv.
Definition: monolish_mpi.cpp:138