NimbleSM
NimbleSM is a solid mechanics simulation code for dynamic systems
Loading...
Searching...
No Matches
nimble.mpi.mpicontext.h
Go to the documentation of this file.
1/*
2//@HEADER
3// ************************************************************************
4//
5// NimbleSM
6// Copyright 2018
7// National Technology & Engineering Solutions of Sandia, LLC (NTESS)
8//
9// Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government
10// retains certain rights in this software.
11//
12// Redistribution and use in source and binary forms, with or without
13// modification, are permitted provided that the following conditions are
14// met:
15//
16// 1. Redistributions of source code must retain the above copyright
17// notice, this list of conditions and the following disclaimer.
18//
19// 2. Redistributions in binary form must reproduce the above copyright
20// notice, this list of conditions and the following disclaimer in the
21// documentation and/or other materials provided with the distribution.
22//
23// 3. Neither the name of the Corporation nor the names of the
24// contributors may be used to endorse or promote products derived from
25// this software without specific prior written permission.
26//
27// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY EXPRESS OR IMPLIED
28// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
30// NO EVENT SHALL NTESS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
31// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
32// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37//
38// Questions? Contact David Littlewood (djlittl@sandia.gov)
39//
40// ************************************************************************
41//@HEADER
42*/
43
44#pragma once
45#include <mpi.h>
46
47#include <algorithm>
48#include <array>
49#include <cstring>
50#include <exception>
51#include <iostream>
52#include <memory>
53#include <numeric>
54#include <string>
55#include <tuple>
56#include <type_traits>
57#include <vector>
58
60
61namespace nimble {
63{
64 static std::vector<int> serialization_buffer;
65 static int
66 get_rank(MPI_Comm comm)
67 {
68 int rank;
69 MPI_Comm_rank(comm, &rank);
70 return rank;
71 }
72 static int
73 get_size(MPI_Comm comm)
74 {
75 int size;
76 MPI_Comm_size(comm, &size);
77 return size;
78 }
79 int rank = 0;
80 int size = 0;
81 MPI_Comm comm;
82
83 public:
84 const MPI_Comm&
85 get_comm() const
86 {
87 return comm;
88 }
89 int
90 get_size() const
91 {
92 return size;
93 }
94 int
95 get_rank() const
96 {
97 return rank;
98 }
99 mpicontext() = default;
100 mpicontext(MPI_Comm comm) : rank{get_rank(comm)}, size{get_size(comm)}, comm{comm} {}
101 mpicontext(const mpicontext& context) = default;
102 mpicontext(mpicontext&& context) = default;
104 operator=(const mpicontext& other) = default;
106 operator=(mpicontext&& other) = default;
107 int
108 get_root() const
109 {
110 return 0;
111 }
112 bool
113 is_root() const
114 {
115 return rank == get_root();
116 }
117 void
118 send(const std::vector<int>& data, int rank, int tag) const
119 {
120 MPI_Send(data.data(), data.size(), MPI_INT, rank, tag, this->comm);
121 }
122 void
123 send(std::pair<int*, int> data, int rank, int tag) const
124 {
125 MPI_Send(data.first, data.second, MPI_INT, rank, tag, this->comm);
126 }
127 int
128 recv_count(int rank, int tag) const
129 {
130 int count = 0;
131 MPI_Status recv_status;
132 MPI_Probe(rank, tag, this->comm, &recv_status);
133 MPI_Get_count(&recv_status, MPI_INT, &count);
134 return count;
135 }
136 int
137 recv(std::vector<int>& data, int rank, int tag) const
138 {
139 int count = recv_count(rank, tag);
140 data.resize(count);
141 MPI_Recv(data.data(), count, MPI_INT, rank, tag, this->comm, MPI_STATUS_IGNORE);
142 return count;
143 }
144 int
145 recv(int* data, int rank, int tag, int max_buffer_size) const
146 {
147 MPI_Status recv_status;
148 MPI_Recv(data, max_buffer_size, MPI_INT, rank, tag, this->comm, &recv_status);
149 int count;
150 MPI_Get_count(&recv_status, MPI_INT, &count);
151 return count;
152 }
153 // This will only resize the vector if it needs more space, but not if it's
154 // smaller
155 int
156 recv_avoid_resize(std::vector<int>& data, int rank, int tag) const
157 {
158 int count = recv_count(rank, tag);
159 if (count > data.size()) data.resize(count);
160 MPI_Recv(data.data(), count, MPI_INT, rank, tag, this->comm, MPI_STATUS_IGNORE);
161 return count;
162 }
163 template <class T, class podT>
164 void
165 sendpacked(const T& source, int rank, int tag, std::vector<podT>& buffer) const
166 {
167 serialization::pack_avoid_resize(source, buffer);
168 send(buffer, rank, tag);
169 }
170 template <class T, class podT>
171 void
172 recvpacked(T& dest, int rank, int tag, std::vector<podT>& buffer) const
173 {
174 recv_avoid_resize(buffer, rank, tag);
175 serialization::unpack(dest, buffer);
176 }
177
178 // Attempts to serialize the input and send it.
179 template <class T>
180 void
181 sendpacked(const T& source, int rank, int tag) const
182 {
183 int sendcount = serialization::pack_avoid_resize(source, serialization_buffer);
184 send({serialization_buffer.data(), sendcount}, rank, tag);
185 }
186 std::vector<std::vector<int>>
187 gather(const std::vector<int>& source, int root) const
188 {
189 if (this->get_rank() == root) {
190 // Step 1: figure out how much data is going to be incoming from each rank
191 std::vector<int> recv_counts(this->get_size());
192 int sendcount = source.size();
193 // TO-DO: review documentation at
194 // https://www.mpich.org/static/docs/v3.1/www3/MPI_Gather.html
195 // https://www.mpich.org/static/docs/v3.1/www3/MPI_Gatherv.html
196 // to ensure that the function call is being passed the correct aruments.
197 MPI_Gather(&sendcount, 1, MPI_INT, recv_counts.data(), 1, MPI_INT, root, this->comm);
198
199 // Step 2: Get data from all ranks as contiguous block of data
200 // destination.size() is the sum of the lengths of all incoming arrays.
201 std::vector<int> destination(std::accumulate(recv_counts.begin(), recv_counts.end(), 0));
202 std::vector<int> displacements(this->get_size(), 0);
203 std::partial_sum(recv_counts.begin(), recv_counts.end() - 1, displacements.data() + 1);
204 MPI_Gatherv(
205 source.data(),
206 sendcount,
207 MPI_INT,
208 destination.data(),
209 recv_counts.data(),
210 displacements.data(),
211 MPI_INT,
212 root,
213 this->get_comm());
214
215 // Step 3: Copy data from a contiguous block into a vector of vectors.
216 std::vector<std::vector<int>> result(this->get_size());
217 int * start_iter = destination.data(), *end_iter = destination.data() + recv_counts[0];
218 for (size_t i = 0; i < result.size(); ++i) {
219 result[i] = std::vector<int>(start_iter, end_iter);
220 start_iter = end_iter;
221 end_iter += recv_counts[i];
222 }
223 // Return result
224 return result;
225 } else {
226 // Step 1 (see above)
227 int sendcount = source.size();
228 MPI_Gather(&sendcount, 1, MPI_INT, 0, 0, MPI_INT, root, this->get_comm());
229
230 // Step 2 (see above)
231 MPI_Gatherv(source.data(), sendcount, MPI_INT, 0, 0, 0, MPI_INT, root, this->get_comm());
232
233 // Return empty vector, as only the root rank will recieve incoming data.
234 return {};
235 }
236 }
237 std::vector<int>
238 scatter(int root, const std::vector<std::vector<int>>& source) const
239 {
240 if (this->get_rank() == root) {
241 // Step 1: notify each rank of how much data they should expect to recieve
242 int recvcount;
243 std::vector<int> send_counts(this->get_size());
244 for (size_t i = 0; i < send_counts.size(); ++i) { send_counts[i] = source[i].size(); }
245 MPI_Scatter(send_counts.data(), 1, MPI_INT, &recvcount, 1, MPI_INT, root, this->get_comm());
246
247 // Step 2: send out data to each of the ranks.
248 std::vector<int> destination(recvcount, 0);
249 std::vector<int> displacements(this->get_size(), 0);
250 std::vector<int> source_made_contiguous(std::accumulate(send_counts.begin(), send_counts.end(), 0), 0);
251
252 std::partial_sum(send_counts.begin(), send_counts.end() - 1, displacements.data() + 1);
253 MPI_Scatterv(
254 source.data(),
255 send_counts.data(),
256 displacements.data(),
257 MPI_INT,
258 destination.data(),
259 recvcount,
260 MPI_INT,
261 root,
262 this->get_comm());
263 } else {
264 int recvcount;
265 MPI_Scatter(0, 0, MPI_INT, &recvcount, 1, MPI_INT, root, this->get_comm());
266 std::vector<int> recvbuff(recvcount);
267 MPI_Scatterv(0, 0, 0, MPI_INT, recvbuff.data(), recvcount, MPI_INT, root, this->get_comm());
268 return recvbuff;
269 }
270 return std::vector<int>();
271 }
272
273 template <size_t count>
274 std::vector<std::array<int, count>>
275 allgather_to_vector(const std::array<int, count>& values) const
276 {
277 std::vector<std::array<int, count>> buffer(get_size());
278 MPI_Allgather(
279 values.data(), /* data being sent */
280 (int)values.size(), /* amount of data being sent */
281 MPI_INT, /* Send type */
282 buffer.data(), /* buffer to place revieced data */
283 (int)values.size(), /* spacing with which to insert data from buffer */
284 MPI_INT, /* Recieve type */
285 get_comm() /* MPI Comm to use */
286 );
287 return buffer;
288 }
289 std::vector<int>
290 allgather_to_vector(int value) const
291 {
292 std::vector<int> buffer(get_size());
293 MPI_Allgather(
294 &value, /* data being sent */
295 1, /* amount of data being sent */
296 MPI_INT, /* Send type */
297 buffer.data(), /* buffer to place revieced data */
298 1, /* spacing with which to insert data from buffer */
299 MPI_INT, /* Recieve type */
300 get_comm() /* MPI Comm to use */
301 );
302 return buffer;
303 }
304 std::vector<size_t>
305 allgather_to_vector(size_t value) const
306 {
307 std::vector<size_t> buffer(get_size());
308 if (sizeof(unsigned long) == sizeof(size_t)) {
309 MPI_Allgather(
310 &value, /* data being sent */
311 1, /* amount of data being sent */
312 MPI_UNSIGNED_LONG, /* Send type */
313 buffer.data(), /* buffer to place revieced data */
314 1, /* spacing with which to insert data from buffer */
315 MPI_UNSIGNED_LONG, /* Recieve type */
316 get_comm() /* MPI Comm to use */
317 );
318 } else {
319 MPI_Allgather(
320 &value, /* data being sent */
321 sizeof(size_t), /* amount of data being sent */
322 MPI_UNSIGNED_CHAR, /* Send type */
323 buffer.data(), /* buffer to place revieced data */
324 sizeof(size_t), /* spacing with which to insert data from buffer */
325 MPI_UNSIGNED_CHAR, /* Recieve type */
326 get_comm() /* MPI Comm to use */
327 );
328 }
329 return buffer;
330 }
331 void
332 gather_send(int value) const
333 {
334 MPI_Gather(
335 &value, /* data being sent */
336 1, /* amount of data being sent */
337 MPI_INT, /* Send type */
338 nullptr, /* buffer to place revieced data */
339 1, /* spacing with which to insert data from buffer */
340 MPI_INT, /* Recieve type */
341 get_root(), /* MPI Comm to use */
342 get_comm() /* rank that acts as root */
343 );
344 }
345 std::vector<int>
346 gather_recieve(int value) const
347 {
348 std::vector<int> buffer(get_size());
349 MPI_Gather(
350 &value, /* data being sent */
351 1, /* amount of data being sent */
352 MPI_INT, /* Send type */
353 buffer.data(), /* buffer to place revieced data */
354 1, /* spacing with which to insert data from buffer */
355 MPI_INT, /* Recieve type */
356 get_root(), /* MPI Comm to use */
357 get_comm() /* rank that acts as root */
358 );
359 return buffer;
360 }
361 template <size_t n>
362 void
363 gather_send(const std::array<int, n>& arr) const
364 {
365 MPI_Gather(
366 arr.data(), /* data being sent */
367 n, /* amount of data being sent */
368 MPI_INT, /* Send type */
369 nullptr, /* buffer to place revieced data */
370 n, /* spacing with which to insert data from buffer */
371 MPI_INT, /* Recieve type */
372 get_root(), /* MPI Comm to use */
373 get_comm() /* rank that acts as root */
374 );
375 }
376 template <size_t n>
377 std::vector<std::array<int, n>>
378 gather_recieve(const std::array<int, n>& arr) const
379 {
380 std::vector<std::array<int, n>> buffer(get_size());
381 MPI_Gather(
382 arr.data(), /* data being sent */
383 n, /* amount of data being sent */
384 MPI_INT, /* Send type */
385 buffer.data(), /* buffer to place revieced data */
386 n, /* spacing with which to insert data from buffer */
387 MPI_INT, /* Recieve type */
388 get_root(), /* MPI Comm to use */
389 get_comm() /* rank that acts as root */
390 );
391 return buffer;
392 }
393 void
394 gather_send(int* data, int size) const
395 {
396 MPI_Gather(
397 data, /* data being sent */
398 size, /* amount of data being sent */
399 MPI_INT, /* Send type */
400 nullptr, /* buffer to place revieced data */
401 size, /* spacing with which to insert data from buffer */
402 MPI_INT, /* Recieve type */
403 get_root(), /* MPI Comm to use */
404 get_comm() /* rank that acts as root */
405 );
406 }
407 std::vector<int>
408 gather_recieve(int* data, int size) const
409 {
410 std::vector<int> buffer(get_size() * size);
411 MPI_Gather(
412 data, /* data being sent */
413 size, /* amount of data being sent */
414 MPI_INT, /* Send type */
415 buffer.data(), /* buffer to place revieced data */
416 size, /* spacing with which to insert data from buffer */
417 MPI_INT, /* Recieve type */
418 get_root(), /* MPI Comm to use */
419 get_comm()); /* rank that acts as root */
420 return buffer;
421 }
422
423 void
424 bcast(int& value) const
425 {
426 MPI_Bcast(&value, 1, MPI_INT, this->get_root(), this->get_comm());
427 }
428 template <size_t count>
429 void
430 bcast(std::array<int, count>& arr) const
431 {
432 MPI_Bcast(arr.data(), count, MPI_INT, this->get_root(), this->get_comm());
433 }
434
435 std::string
436 catenate_gatherv(const std::string& str) const
437 {
438 if (is_root()) {
439 std::vector<int> counts{gather_recieve(str.size())};
440 std::vector<int> displacements(counts.size());
441 std::partial_sum(counts.begin(), counts.end() - 1, displacements.begin() + 1);
442 std::string dest((size_t)(displacements.back() + counts.back()), ' ');
443 MPI_Gatherv(
444 str.data(), /* data being sent */
445 str.size(), /* amount of data being sent */
446 MPI_CHAR, /* type of data being sent */
447 &dest[0], /* destination of data being sent */
448 counts.data(), /* expected amount of data to be recieved from each
449 rank */
450 displacements.data(), /* displacements of revieced data */
451 MPI_CHAR, /* type of data being recieved */
452 this->get_root(), /* root */
453 this->get_comm() /* MPI Comm to use */
454 );
455 return dest;
456 } else {
457 gather_send(str.size());
458 MPI_Gatherv(
459 str.data(), /* data being sent */
460 str.size(), /* amount of data being sent */
461 MPI_CHAR, /* type of data being sent */
462 nullptr, /* destination of data being sent */
463 nullptr, /* expected amount of data to be recieved from each rank */
464 nullptr, /* displacements of revieced data */
465 MPI_CHAR, /* type of data being recieved */
466 get_root(), /* root */
467 get_comm() /* MPI Comm to use */
468 );
469 return {};
470 }
471 }
472 std::string
474 const std::string& str,
475 const std::string& _start,
476 const std::string& separator,
477 const std::string& _end) const
478 {
479 if (is_root()) {
480 std::vector<int> counts{gather_recieve(str.size())};
481 std::vector<int> displacements;
482 displacements.reserve(counts.size());
483 int displacement_accumulator = _start.size();
484 int separator_size = separator.size();
485 for (int count : counts) {
486 displacements.emplace_back(displacement_accumulator);
487 displacement_accumulator += separator_size + count;
488 }
489 std::string dest((size_t)(displacement_accumulator + _end.size() - separator_size), ' ');
490 MPI_Gatherv(
491 str.data(), /* data being sent */
492 str.size(), /* amount of data being sent */
493 MPI_CHAR, /* type of data being sent */
494 &dest[0], /* destination of data being sent */
495 counts.data(), /* expected amount of data to be recieved from each
496 rank */
497 displacements.data(), /* displacements of revieced data */
498 MPI_CHAR, /* type of data being recieved */
499 this->get_root(), /* root */
500 this->get_comm() /* MPI Comm to use */
501 );
502 std::copy(_start.begin(), _start.end(), &dest[0]);
503 for (int i = 0, max = counts.size() - 1; i < max; ++i) {
504 int offset = displacements[i] + counts[i];
505 std::copy(separator.begin(), separator.end(), &dest[offset]);
506 }
507 std::copy(_end.begin(), _end.end(), &dest[displacements.back() + counts.back()]);
508 return dest;
509 } else {
510 gather_send(str.size());
511 MPI_Gatherv(
512 str.data(), /* data being sent */
513 str.size(), /* amount of data being sent */
514 MPI_CHAR, /* type of data being sent */
515 nullptr, /* destination of data being sent */
516 nullptr, /* expected amount of data to be recieved from each rank */
517 nullptr, /* displacements of revieced data */
518 MPI_CHAR, /* type of data being recieved */
519 get_root(), /* root */
520 get_comm() /* MPI Comm to use */
521 );
522 return {};
523 }
524 }
525 const mpicontext&
526 print(const std::string& s, std::ostream& os = std::cout) const
527 {
528 std::string str = catenate_gatherv(s);
529 if (is_root()) { os << str; }
530 return *this;
531 }
532 const mpicontext&
534 const std::string& s,
535 const std::string& _start,
536 const std::string& separator,
537 const std::string& _end,
538 std::ostream& os = std::cout) const
539 {
540 std::string str = catenate_gatherv_format(s, _start, separator, _end);
541 if (is_root()) { os << str; }
542 return *this;
543 }
544 const mpicontext&
545 println(const std::string& s, std::ostream& os = std::cout) const
546 {
547 std::string str = catenate_gatherv(s + "\n");
548 if (is_root()) { os << str; }
549 return *this;
550 }
551 template <class T>
552 const mpicontext&
553 print_if_root(const T& s, std::ostream& os = std::cout) const
554 {
555 if (is_root()) { os << s; }
556 return *this;
557 }
558 template <class T>
559 const mpicontext&
560 println_if_root(const T& s, std::ostream& os = std::cout) const
561 {
562 if (is_root()) { os << s << std::endl; }
563 return *this;
564 }
565 void
566 gatherv_send(const std::vector<int>& ints) const
567 {
568 // See: https://www.open-mpi.org/doc/v2.1/man3/MPI_Gatherv.3.php
569 MPI_Gatherv(
570 ints.data(), /* data being sent */
571 ints.size(), /* amount of data being sent */
572 MPI_INT, /* type of data being sent */
573 nullptr, /* destination of data being sent */
574 nullptr, /* expected amount of data to be recieved from each rank */
575 nullptr, /* displacements of revieced data */
576 MPI_INT, /* type of data being recieved */
577 get_root(), /* root */
578 get_comm()); /* MPI Comm to use */
579 }
580 void
582 const std::vector<int>& ints,
583 std::vector<int>& dest,
584 const std::vector<int>& counts,
585 const std::vector<int>& displacements) const
586 {
587 // See: https://www.open-mpi.org/doc/v2.1/man3/MPI_Gatherv.3.php
588 MPI_Gatherv(
589 ints.data(), /* data being sent */
590 ints.size(), /* amount of data being sent */
591 MPI_INT, /* type of data being sent */
592 dest.data(), /* destination of data being sent */
593 counts.data(), /* expected amount of data to be recieved from each rank */
594 displacements.data(), /* displacements of revieced data */
595 MPI_INT, /* type of data being recieved */
596 this->get_root(), /* root */
597 this->get_comm()); /* MPI Comm to use */
598 }
599
600 void
602 const std::vector<int>& ints,
603 const std::vector<int>& counts,
604 const std::vector<int>& displacements,
605 std::vector<int>& dest) const
606 {
607 // See: https://www.open-mpi.org/doc/v2.1/man3/MPI_Scatterv.3.php
608 MPI_Scatterv(
609 ints.data(), /* data being sent */
610 counts.data(), /* amount of data being sent to each rank */
611 displacements.data(), /* displacements of data being sent */
612 MPI_INT, /* type of data being sent */
613 dest.data(), /* place to put sent data */
614 dest.size(), /* amount of data to recieve */
615 MPI_INT, /* type of data being recieved */
616 this->get_root(), /* root sending the data */
617 this->get_comm()); /*communicator */
618 }
619 void
620 scatterv_recieve(std::vector<int>& dest) const
621 {
622 // See: https://www.open-mpi.org/doc/v2.1/man3/MPI_Scatterv.3.php
623 MPI_Scatterv(
624 nullptr, /* data being sent */
625 nullptr, /* amount of data being sent to each rank */
626 nullptr, /* displacements of data being sent */
627 MPI_INT, /* type of data being sent */
628 dest.data(), /* place to put sent data */
629 dest.size(), /* amount of data to recieve */
630 MPI_INT, /* type of data being recieved */
631 this->get_root(), /* root sending the data */
632 this->get_comm()); /*communicator */
633 }
634 MPI_Comm
635 split_by_color(int color) const
636 {
637 return split_by_color(color, this->get_rank());
638 }
639 MPI_Comm
640 split_by_color(int color, int key) const
641 {
642 MPI_Comm new_comm;
643 MPI_Comm_split(get_comm(), color, key, &new_comm);
644 return new_comm;
645 }
646 MPI_Comm
648 {
649 return split_by_color(MPI_UNDEFINED);
650 }
651};
652} // namespace nimble
Definition nimble.mpi.mpicontext.h:63
void bcast(int &value) const
Definition nimble.mpi.mpicontext.h:424
int recv_avoid_resize(std::vector< int > &data, int rank, int tag) const
Definition nimble.mpi.mpicontext.h:156
void gatherv_send(const std::vector< int > &ints) const
Definition nimble.mpi.mpicontext.h:566
void scatterv_send(const std::vector< int > &ints, const std::vector< int > &counts, const std::vector< int > &displacements, std::vector< int > &dest) const
Definition nimble.mpi.mpicontext.h:601
int recv(std::vector< int > &data, int rank, int tag) const
Definition nimble.mpi.mpicontext.h:137
std::string catenate_gatherv(const std::string &str) const
Definition nimble.mpi.mpicontext.h:436
MPI_Comm split_by_color() const
Definition nimble.mpi.mpicontext.h:647
mpicontext(const mpicontext &context)=default
int get_rank() const
Definition nimble.mpi.mpicontext.h:95
void sendpacked(const T &source, int rank, int tag, std::vector< podT > &buffer) const
Definition nimble.mpi.mpicontext.h:165
mpicontext(mpicontext &&context)=default
mpicontext & operator=(const mpicontext &other)=default
const mpicontext & println(const std::string &s, std::ostream &os=std::cout) const
Definition nimble.mpi.mpicontext.h:545
MPI_Comm split_by_color(int color) const
Definition nimble.mpi.mpicontext.h:635
mpicontext & operator=(mpicontext &&other)=default
const mpicontext & print(const std::string &s, std::ostream &os=std::cout) const
Definition nimble.mpi.mpicontext.h:526
const mpicontext & println_if_root(const T &s, std::ostream &os=std::cout) const
Definition nimble.mpi.mpicontext.h:560
void send(const std::vector< int > &data, int rank, int tag) const
Definition nimble.mpi.mpicontext.h:118
int recv(int *data, int rank, int tag, int max_buffer_size) const
Definition nimble.mpi.mpicontext.h:145
const MPI_Comm & get_comm() const
Definition nimble.mpi.mpicontext.h:85
std::vector< int > gather_recieve(int *data, int size) const
Definition nimble.mpi.mpicontext.h:408
mpicontext(MPI_Comm comm)
Definition nimble.mpi.mpicontext.h:100
void gather_send(int *data, int size) const
Definition nimble.mpi.mpicontext.h:394
int recv_count(int rank, int tag) const
Definition nimble.mpi.mpicontext.h:128
std::vector< std::vector< int > > gather(const std::vector< int > &source, int root) const
Definition nimble.mpi.mpicontext.h:187
std::vector< int > scatter(int root, const std::vector< std::vector< int > > &source) const
Definition nimble.mpi.mpicontext.h:238
std::vector< size_t > allgather_to_vector(size_t value) const
Definition nimble.mpi.mpicontext.h:305
std::vector< int > allgather_to_vector(int value) const
Definition nimble.mpi.mpicontext.h:290
std::vector< int > gather_recieve(int value) const
Definition nimble.mpi.mpicontext.h:346
const mpicontext & print_formatted(const std::string &s, const std::string &_start, const std::string &separator, const std::string &_end, std::ostream &os=std::cout) const
Definition nimble.mpi.mpicontext.h:533
void sendpacked(const T &source, int rank, int tag) const
Definition nimble.mpi.mpicontext.h:181
int get_size() const
Definition nimble.mpi.mpicontext.h:90
MPI_Comm split_by_color(int color, int key) const
Definition nimble.mpi.mpicontext.h:640
const mpicontext & print_if_root(const T &s, std::ostream &os=std::cout) const
Definition nimble.mpi.mpicontext.h:553
void gatherv_recieve(const std::vector< int > &ints, std::vector< int > &dest, const std::vector< int > &counts, const std::vector< int > &displacements) const
Definition nimble.mpi.mpicontext.h:581
void send(std::pair< int *, int > data, int rank, int tag) const
Definition nimble.mpi.mpicontext.h:123
std::string catenate_gatherv_format(const std::string &str, const std::string &_start, const std::string &separator, const std::string &_end) const
Definition nimble.mpi.mpicontext.h:473
void gather_send(const std::array< int, n > &arr) const
Definition nimble.mpi.mpicontext.h:363
void recvpacked(T &dest, int rank, int tag, std::vector< podT > &buffer) const
Definition nimble.mpi.mpicontext.h:172
void scatterv_recieve(std::vector< int > &dest) const
Definition nimble.mpi.mpicontext.h:620
mpicontext()=default
int get_root() const
Definition nimble.mpi.mpicontext.h:108
void bcast(std::array< int, count > &arr) const
Definition nimble.mpi.mpicontext.h:430
void gather_send(int value) const
Definition nimble.mpi.mpicontext.h:332
std::vector< std::array< int, count > > allgather_to_vector(const std::array< int, count > &values) const
Definition nimble.mpi.mpicontext.h:275
bool is_root() const
Definition nimble.mpi.mpicontext.h:113
std::vector< std::array< int, n > > gather_recieve(const std::array< int, n > &arr) const
Definition nimble.mpi.mpicontext.h:378
size_t pack_avoid_resize(const T &object, std::vector< dataT > &vect)
Definition nimble.mpi.serialization.h:313
void unpack(T &object, const std::vector< dataT > &vect)
Definition nimble.mpi.serialization.h:325
Definition kokkos_contact_manager.h:49