NimbleSM
NimbleSM is a solid mechanics simulation code for dynamic systems
Loading...
Searching...
No Matches
nimble::mpicontext Class Reference

#include <nimble.mpi.mpicontext.h>

Public Member Functions

const MPI_Comm & get_comm () const
 
int get_size () const
 
int get_rank () const
 
 mpicontext ()=default
 
 mpicontext (MPI_Comm comm)
 
 mpicontext (const mpicontext &context)=default
 
 mpicontext (mpicontext &&context)=default
 
mpicontextoperator= (const mpicontext &other)=default
 
mpicontextoperator= (mpicontext &&other)=default
 
int get_root () const
 
bool is_root () const
 
void send (const std::vector< int > &data, int rank, int tag) const
 
void send (std::pair< int *, int > data, int rank, int tag) const
 
int recv_count (int rank, int tag) const
 
int recv (std::vector< int > &data, int rank, int tag) const
 
int recv (int *data, int rank, int tag, int max_buffer_size) const
 
int recv_avoid_resize (std::vector< int > &data, int rank, int tag) const
 
template<class T, class podT>
void sendpacked (const T &source, int rank, int tag, std::vector< podT > &buffer) const
 
template<class T, class podT>
void recvpacked (T &dest, int rank, int tag, std::vector< podT > &buffer) const
 
template<class T>
void sendpacked (const T &source, int rank, int tag) const
 
std::vector< std::vector< int > > gather (const std::vector< int > &source, int root) const
 
std::vector< int > scatter (int root, const std::vector< std::vector< int > > &source) const
 
template<size_t count>
std::vector< std::array< int, count > > allgather_to_vector (const std::array< int, count > &values) const
 
std::vector< int > allgather_to_vector (int value) const
 
std::vector< size_t > allgather_to_vector (size_t value) const
 
void gather_send (int value) const
 
std::vector< int > gather_recieve (int value) const
 
template<size_t n>
void gather_send (const std::array< int, n > &arr) const
 
template<size_t n>
std::vector< std::array< int, n > > gather_recieve (const std::array< int, n > &arr) const
 
void gather_send (int *data, int size) const
 
std::vector< int > gather_recieve (int *data, int size) const
 
void bcast (int &value) const
 
template<size_t count>
void bcast (std::array< int, count > &arr) const
 
std::string catenate_gatherv (const std::string &str) const
 
std::string catenate_gatherv_format (const std::string &str, const std::string &_start, const std::string &separator, const std::string &_end) const
 
const mpicontextprint (const std::string &s, std::ostream &os=std::cout) const
 
const mpicontextprint_formatted (const std::string &s, const std::string &_start, const std::string &separator, const std::string &_end, std::ostream &os=std::cout) const
 
const mpicontextprintln (const std::string &s, std::ostream &os=std::cout) const
 
template<class T>
const mpicontextprint_if_root (const T &s, std::ostream &os=std::cout) const
 
template<class T>
const mpicontextprintln_if_root (const T &s, std::ostream &os=std::cout) const
 
void gatherv_send (const std::vector< int > &ints) const
 
void gatherv_recieve (const std::vector< int > &ints, std::vector< int > &dest, const std::vector< int > &counts, const std::vector< int > &displacements) const
 
void scatterv_send (const std::vector< int > &ints, const std::vector< int > &counts, const std::vector< int > &displacements, std::vector< int > &dest) const
 
void scatterv_recieve (std::vector< int > &dest) const
 
MPI_Comm split_by_color (int color) const
 
MPI_Comm split_by_color (int color, int key) const
 
MPI_Comm split_by_color () const
 

Constructor & Destructor Documentation

◆ mpicontext() [1/4]

nimble::mpicontext::mpicontext ( )
default

◆ mpicontext() [2/4]

nimble::mpicontext::mpicontext ( MPI_Comm comm)
inline
100: rank{get_rank(comm)}, size{get_size(comm)}, comm{comm} {}
int get_rank() const
Definition nimble.mpi.mpicontext.h:95
int get_size() const
Definition nimble.mpi.mpicontext.h:90

◆ mpicontext() [3/4]

nimble::mpicontext::mpicontext ( const mpicontext & context)
default

◆ mpicontext() [4/4]

nimble::mpicontext::mpicontext ( mpicontext && context)
default

Member Function Documentation

◆ allgather_to_vector() [1/3]

template<size_t count>
std::vector< std::array< int, count > > nimble::mpicontext::allgather_to_vector ( const std::array< int, count > & values) const
inline
276 {
277 std::vector<std::array<int, count>> buffer(get_size());
278 MPI_Allgather(
279 values.data(), /* data being sent */
280 (int)values.size(), /* amount of data being sent */
281 MPI_INT, /* Send type */
282 buffer.data(), /* buffer to place revieced data */
283 (int)values.size(), /* spacing with which to insert data from buffer */
284 MPI_INT, /* Recieve type */
285 get_comm() /* MPI Comm to use */
286 );
287 return buffer;
288 }
const MPI_Comm & get_comm() const
Definition nimble.mpi.mpicontext.h:85

◆ allgather_to_vector() [2/3]

std::vector< int > nimble::mpicontext::allgather_to_vector ( int value) const
inline
291 {
292 std::vector<int> buffer(get_size());
293 MPI_Allgather(
294 &value, /* data being sent */
295 1, /* amount of data being sent */
296 MPI_INT, /* Send type */
297 buffer.data(), /* buffer to place revieced data */
298 1, /* spacing with which to insert data from buffer */
299 MPI_INT, /* Recieve type */
300 get_comm() /* MPI Comm to use */
301 );
302 return buffer;
303 }

◆ allgather_to_vector() [3/3]

std::vector< size_t > nimble::mpicontext::allgather_to_vector ( size_t value) const
inline
306 {
307 std::vector<size_t> buffer(get_size());
308 if (sizeof(unsigned long) == sizeof(size_t)) {
309 MPI_Allgather(
310 &value, /* data being sent */
311 1, /* amount of data being sent */
312 MPI_UNSIGNED_LONG, /* Send type */
313 buffer.data(), /* buffer to place revieced data */
314 1, /* spacing with which to insert data from buffer */
315 MPI_UNSIGNED_LONG, /* Recieve type */
316 get_comm() /* MPI Comm to use */
317 );
318 } else {
319 MPI_Allgather(
320 &value, /* data being sent */
321 sizeof(size_t), /* amount of data being sent */
322 MPI_UNSIGNED_CHAR, /* Send type */
323 buffer.data(), /* buffer to place revieced data */
324 sizeof(size_t), /* spacing with which to insert data from buffer */
325 MPI_UNSIGNED_CHAR, /* Recieve type */
326 get_comm() /* MPI Comm to use */
327 );
328 }
329 return buffer;
330 }

◆ bcast() [1/2]

void nimble::mpicontext::bcast ( int & value) const
inline
425 {
426 MPI_Bcast(&value, 1, MPI_INT, this->get_root(), this->get_comm());
427 }
int get_root() const
Definition nimble.mpi.mpicontext.h:108

◆ bcast() [2/2]

template<size_t count>
void nimble::mpicontext::bcast ( std::array< int, count > & arr) const
inline
431 {
432 MPI_Bcast(arr.data(), count, MPI_INT, this->get_root(), this->get_comm());
433 }
return this
Definition nimble_expression_parser.h:352

◆ catenate_gatherv()

std::string nimble::mpicontext::catenate_gatherv ( const std::string & str) const
inline
437 {
438 if (is_root()) {
439 std::vector<int> counts{gather_recieve(str.size())};
440 std::vector<int> displacements(counts.size());
441 std::partial_sum(counts.begin(), counts.end() - 1, displacements.begin() + 1);
442 std::string dest((size_t)(displacements.back() + counts.back()), ' ');
443 MPI_Gatherv(
444 str.data(), /* data being sent */
445 str.size(), /* amount of data being sent */
446 MPI_CHAR, /* type of data being sent */
447 &dest[0], /* destination of data being sent */
448 counts.data(), /* expected amount of data to be recieved from each
449 rank */
450 displacements.data(), /* displacements of revieced data */
451 MPI_CHAR, /* type of data being recieved */
452 this->get_root(), /* root */
453 this->get_comm() /* MPI Comm to use */
454 );
455 return dest;
456 } else {
457 gather_send(str.size());
458 MPI_Gatherv(
459 str.data(), /* data being sent */
460 str.size(), /* amount of data being sent */
461 MPI_CHAR, /* type of data being sent */
462 nullptr, /* destination of data being sent */
463 nullptr, /* expected amount of data to be recieved from each rank */
464 nullptr, /* displacements of revieced data */
465 MPI_CHAR, /* type of data being recieved */
466 get_root(), /* root */
467 get_comm() /* MPI Comm to use */
468 );
469 return {};
470 }
471 }
std::vector< int > gather_recieve(int value) const
Definition nimble.mpi.mpicontext.h:346
void gather_send(int value) const
Definition nimble.mpi.mpicontext.h:332
bool is_root() const
Definition nimble.mpi.mpicontext.h:113

◆ catenate_gatherv_format()

std::string nimble::mpicontext::catenate_gatherv_format ( const std::string & str,
const std::string & _start,
const std::string & separator,
const std::string & _end ) const
inline
478 {
479 if (is_root()) {
480 std::vector<int> counts{gather_recieve(str.size())};
481 std::vector<int> displacements;
482 displacements.reserve(counts.size());
483 int displacement_accumulator = _start.size();
484 int separator_size = separator.size();
485 for (int count : counts) {
486 displacements.emplace_back(displacement_accumulator);
487 displacement_accumulator += separator_size + count;
488 }
489 std::string dest((size_t)(displacement_accumulator + _end.size() - separator_size), ' ');
490 MPI_Gatherv(
491 str.data(), /* data being sent */
492 str.size(), /* amount of data being sent */
493 MPI_CHAR, /* type of data being sent */
494 &dest[0], /* destination of data being sent */
495 counts.data(), /* expected amount of data to be recieved from each
496 rank */
497 displacements.data(), /* displacements of revieced data */
498 MPI_CHAR, /* type of data being recieved */
499 this->get_root(), /* root */
500 this->get_comm() /* MPI Comm to use */
501 );
502 std::copy(_start.begin(), _start.end(), &dest[0]);
503 for (int i = 0, max = counts.size() - 1; i < max; ++i) {
504 int offset = displacements[i] + counts[i];
505 std::copy(separator.begin(), separator.end(), &dest[offset]);
506 }
507 std::copy(_end.begin(), _end.end(), &dest[displacements.back() + counts.back()]);
508 return dest;
509 } else {
510 gather_send(str.size());
511 MPI_Gatherv(
512 str.data(), /* data being sent */
513 str.size(), /* amount of data being sent */
514 MPI_CHAR, /* type of data being sent */
515 nullptr, /* destination of data being sent */
516 nullptr, /* expected amount of data to be recieved from each rank */
517 nullptr, /* displacements of revieced data */
518 MPI_CHAR, /* type of data being recieved */
519 get_root(), /* root */
520 get_comm() /* MPI Comm to use */
521 );
522 return {};
523 }
524 }

◆ gather()

std::vector< std::vector< int > > nimble::mpicontext::gather ( const std::vector< int > & source,
int root ) const
inline
188 {
189 if (this->get_rank() == root) {
190 // Step 1: figure out how much data is going to be incoming from each rank
191 std::vector<int> recv_counts(this->get_size());
192 int sendcount = source.size();
193 // TO-DO: review documentation at
194 // https://www.mpich.org/static/docs/v3.1/www3/MPI_Gather.html
195 // https://www.mpich.org/static/docs/v3.1/www3/MPI_Gatherv.html
196 // to ensure that the function call is being passed the correct aruments.
197 MPI_Gather(&sendcount, 1, MPI_INT, recv_counts.data(), 1, MPI_INT, root, this->comm);
198
199 // Step 2: Get data from all ranks as contiguous block of data
200 // destination.size() is the sum of the lengths of all incoming arrays.
201 std::vector<int> destination(std::accumulate(recv_counts.begin(), recv_counts.end(), 0));
202 std::vector<int> displacements(this->get_size(), 0);
203 std::partial_sum(recv_counts.begin(), recv_counts.end() - 1, displacements.data() + 1);
204 MPI_Gatherv(
205 source.data(),
206 sendcount,
207 MPI_INT,
208 destination.data(),
209 recv_counts.data(),
210 displacements.data(),
211 MPI_INT,
212 root,
213 this->get_comm());
214
215 // Step 3: Copy data from a contiguous block into a vector of vectors.
216 std::vector<std::vector<int>> result(this->get_size());
217 int * start_iter = destination.data(), *end_iter = destination.data() + recv_counts[0];
218 for (size_t i = 0; i < result.size(); ++i) {
219 result[i] = std::vector<int>(start_iter, end_iter);
220 start_iter = end_iter;
221 end_iter += recv_counts[i];
222 }
223 // Return result
224 return result;
225 } else {
226 // Step 1 (see above)
227 int sendcount = source.size();
228 MPI_Gather(&sendcount, 1, MPI_INT, 0, 0, MPI_INT, root, this->get_comm());
229
230 // Step 2 (see above)
231 MPI_Gatherv(source.data(), sendcount, MPI_INT, 0, 0, 0, MPI_INT, root, this->get_comm());
232
233 // Return empty vector, as only the root rank will recieve incoming data.
234 return {};
235 }
236 }

◆ gather_recieve() [1/3]

template<size_t n>
std::vector< std::array< int, n > > nimble::mpicontext::gather_recieve ( const std::array< int, n > & arr) const
inline
379 {
380 std::vector<std::array<int, n>> buffer(get_size());
381 MPI_Gather(
382 arr.data(), /* data being sent */
383 n, /* amount of data being sent */
384 MPI_INT, /* Send type */
385 buffer.data(), /* buffer to place revieced data */
386 n, /* spacing with which to insert data from buffer */
387 MPI_INT, /* Recieve type */
388 get_root(), /* MPI Comm to use */
389 get_comm() /* rank that acts as root */
390 );
391 return buffer;
392 }

◆ gather_recieve() [2/3]

std::vector< int > nimble::mpicontext::gather_recieve ( int * data,
int size ) const
inline
409 {
410 std::vector<int> buffer(get_size() * size);
411 MPI_Gather(
412 data, /* data being sent */
413 size, /* amount of data being sent */
414 MPI_INT, /* Send type */
415 buffer.data(), /* buffer to place revieced data */
416 size, /* spacing with which to insert data from buffer */
417 MPI_INT, /* Recieve type */
418 get_root(), /* MPI Comm to use */
419 get_comm()); /* rank that acts as root */
420 return buffer;
421 }

◆ gather_recieve() [3/3]

std::vector< int > nimble::mpicontext::gather_recieve ( int value) const
inline
347 {
348 std::vector<int> buffer(get_size());
349 MPI_Gather(
350 &value, /* data being sent */
351 1, /* amount of data being sent */
352 MPI_INT, /* Send type */
353 buffer.data(), /* buffer to place revieced data */
354 1, /* spacing with which to insert data from buffer */
355 MPI_INT, /* Recieve type */
356 get_root(), /* MPI Comm to use */
357 get_comm() /* rank that acts as root */
358 );
359 return buffer;
360 }

◆ gather_send() [1/3]

template<size_t n>
void nimble::mpicontext::gather_send ( const std::array< int, n > & arr) const
inline
364 {
365 MPI_Gather(
366 arr.data(), /* data being sent */
367 n, /* amount of data being sent */
368 MPI_INT, /* Send type */
369 nullptr, /* buffer to place revieced data */
370 n, /* spacing with which to insert data from buffer */
371 MPI_INT, /* Recieve type */
372 get_root(), /* MPI Comm to use */
373 get_comm() /* rank that acts as root */
374 );
375 }

◆ gather_send() [2/3]

void nimble::mpicontext::gather_send ( int * data,
int size ) const
inline
395 {
396 MPI_Gather(
397 data, /* data being sent */
398 size, /* amount of data being sent */
399 MPI_INT, /* Send type */
400 nullptr, /* buffer to place revieced data */
401 size, /* spacing with which to insert data from buffer */
402 MPI_INT, /* Recieve type */
403 get_root(), /* MPI Comm to use */
404 get_comm() /* rank that acts as root */
405 );
406 }

◆ gather_send() [3/3]

void nimble::mpicontext::gather_send ( int value) const
inline
333 {
334 MPI_Gather(
335 &value, /* data being sent */
336 1, /* amount of data being sent */
337 MPI_INT, /* Send type */
338 nullptr, /* buffer to place revieced data */
339 1, /* spacing with which to insert data from buffer */
340 MPI_INT, /* Recieve type */
341 get_root(), /* MPI Comm to use */
342 get_comm() /* rank that acts as root */
343 );
344 }

◆ gatherv_recieve()

void nimble::mpicontext::gatherv_recieve ( const std::vector< int > & ints,
std::vector< int > & dest,
const std::vector< int > & counts,
const std::vector< int > & displacements ) const
inline
586 {
587 // See: https://www.open-mpi.org/doc/v2.1/man3/MPI_Gatherv.3.php
588 MPI_Gatherv(
589 ints.data(), /* data being sent */
590 ints.size(), /* amount of data being sent */
591 MPI_INT, /* type of data being sent */
592 dest.data(), /* destination of data being sent */
593 counts.data(), /* expected amount of data to be recieved from each rank */
594 displacements.data(), /* displacements of revieced data */
595 MPI_INT, /* type of data being recieved */
596 this->get_root(), /* root */
597 this->get_comm()); /* MPI Comm to use */
598 }

◆ gatherv_send()

void nimble::mpicontext::gatherv_send ( const std::vector< int > & ints) const
inline
567 {
568 // See: https://www.open-mpi.org/doc/v2.1/man3/MPI_Gatherv.3.php
569 MPI_Gatherv(
570 ints.data(), /* data being sent */
571 ints.size(), /* amount of data being sent */
572 MPI_INT, /* type of data being sent */
573 nullptr, /* destination of data being sent */
574 nullptr, /* expected amount of data to be recieved from each rank */
575 nullptr, /* displacements of revieced data */
576 MPI_INT, /* type of data being recieved */
577 get_root(), /* root */
578 get_comm()); /* MPI Comm to use */
579 }

◆ get_comm()

const MPI_Comm & nimble::mpicontext::get_comm ( ) const
inline
86 {
87 return comm;
88 }

◆ get_rank()

int nimble::mpicontext::get_rank ( ) const
inline
96 {
97 return rank;
98 }

◆ get_root()

int nimble::mpicontext::get_root ( ) const
inline
109 {
110 return 0;
111 }

◆ get_size()

int nimble::mpicontext::get_size ( ) const
inline
91 {
92 return size;
93 }

◆ is_root()

bool nimble::mpicontext::is_root ( ) const
inline
114 {
115 return rank == get_root();
116 }

◆ operator=() [1/2]

mpicontext & nimble::mpicontext::operator= ( const mpicontext & other)
default

◆ operator=() [2/2]

mpicontext & nimble::mpicontext::operator= ( mpicontext && other)
default

◆ print()

const mpicontext & nimble::mpicontext::print ( const std::string & s,
std::ostream & os = std::cout ) const
inline
527 {
528 std::string str = catenate_gatherv(s);
529 if (is_root()) { os << str; }
530 return *this;
531 }
std::string catenate_gatherv(const std::string &str) const
Definition nimble.mpi.mpicontext.h:436

◆ print_formatted()

const mpicontext & nimble::mpicontext::print_formatted ( const std::string & s,
const std::string & _start,
const std::string & separator,
const std::string & _end,
std::ostream & os = std::cout ) const
inline
539 {
540 std::string str = catenate_gatherv_format(s, _start, separator, _end);
541 if (is_root()) { os << str; }
542 return *this;
543 }
std::string catenate_gatherv_format(const std::string &str, const std::string &_start, const std::string &separator, const std::string &_end) const
Definition nimble.mpi.mpicontext.h:473

◆ print_if_root()

template<class T>
const mpicontext & nimble::mpicontext::print_if_root ( const T & s,
std::ostream & os = std::cout ) const
inline
554 {
555 if (is_root()) { os << s; }
556 return *this;
557 }

◆ println()

const mpicontext & nimble::mpicontext::println ( const std::string & s,
std::ostream & os = std::cout ) const
inline
546 {
547 std::string str = catenate_gatherv(s + "\n");
548 if (is_root()) { os << str; }
549 return *this;
550 }

◆ println_if_root()

template<class T>
const mpicontext & nimble::mpicontext::println_if_root ( const T & s,
std::ostream & os = std::cout ) const
inline
561 {
562 if (is_root()) { os << s << std::endl; }
563 return *this;
564 }

◆ recv() [1/2]

int nimble::mpicontext::recv ( int * data,
int rank,
int tag,
int max_buffer_size ) const
inline
146 {
147 MPI_Status recv_status;
148 MPI_Recv(data, max_buffer_size, MPI_INT, rank, tag, this->comm, &recv_status);
149 int count;
150 MPI_Get_count(&recv_status, MPI_INT, &count);
151 return count;
152 }

◆ recv() [2/2]

int nimble::mpicontext::recv ( std::vector< int > & data,
int rank,
int tag ) const
inline
138 {
139 int count = recv_count(rank, tag);
140 data.resize(count);
141 MPI_Recv(data.data(), count, MPI_INT, rank, tag, this->comm, MPI_STATUS_IGNORE);
142 return count;
143 }
int recv_count(int rank, int tag) const
Definition nimble.mpi.mpicontext.h:128

◆ recv_avoid_resize()

int nimble::mpicontext::recv_avoid_resize ( std::vector< int > & data,
int rank,
int tag ) const
inline
157 {
158 int count = recv_count(rank, tag);
159 if (count > data.size()) data.resize(count);
160 MPI_Recv(data.data(), count, MPI_INT, rank, tag, this->comm, MPI_STATUS_IGNORE);
161 return count;
162 }

◆ recv_count()

int nimble::mpicontext::recv_count ( int rank,
int tag ) const
inline
129 {
130 int count = 0;
131 MPI_Status recv_status;
132 MPI_Probe(rank, tag, this->comm, &recv_status);
133 MPI_Get_count(&recv_status, MPI_INT, &count);
134 return count;
135 }

◆ recvpacked()

template<class T, class podT>
void nimble::mpicontext::recvpacked ( T & dest,
int rank,
int tag,
std::vector< podT > & buffer ) const
inline
173 {
174 recv_avoid_resize(buffer, rank, tag);
175 serialization::unpack(dest, buffer);
176 }
int recv_avoid_resize(std::vector< int > &data, int rank, int tag) const
Definition nimble.mpi.mpicontext.h:156
void unpack(T &object, const std::vector< dataT > &vect)
Definition nimble.mpi.serialization.h:325

◆ scatter()

std::vector< int > nimble::mpicontext::scatter ( int root,
const std::vector< std::vector< int > > & source ) const
inline
239 {
240 if (this->get_rank() == root) {
241 // Step 1: notify each rank of how much data they should expect to recieve
242 int recvcount;
243 std::vector<int> send_counts(this->get_size());
244 for (size_t i = 0; i < send_counts.size(); ++i) { send_counts[i] = source[i].size(); }
245 MPI_Scatter(send_counts.data(), 1, MPI_INT, &recvcount, 1, MPI_INT, root, this->get_comm());
246
247 // Step 2: send out data to each of the ranks.
248 std::vector<int> destination(recvcount, 0);
249 std::vector<int> displacements(this->get_size(), 0);
250 std::vector<int> source_made_contiguous(std::accumulate(send_counts.begin(), send_counts.end(), 0), 0);
251
252 std::partial_sum(send_counts.begin(), send_counts.end() - 1, displacements.data() + 1);
253 MPI_Scatterv(
254 source.data(),
255 send_counts.data(),
256 displacements.data(),
257 MPI_INT,
258 destination.data(),
259 recvcount,
260 MPI_INT,
261 root,
262 this->get_comm());
263 } else {
264 int recvcount;
265 MPI_Scatter(0, 0, MPI_INT, &recvcount, 1, MPI_INT, root, this->get_comm());
266 std::vector<int> recvbuff(recvcount);
267 MPI_Scatterv(0, 0, 0, MPI_INT, recvbuff.data(), recvcount, MPI_INT, root, this->get_comm());
268 return recvbuff;
269 }
270 return std::vector<int>();
271 }

◆ scatterv_recieve()

void nimble::mpicontext::scatterv_recieve ( std::vector< int > & dest) const
inline
621 {
622 // See: https://www.open-mpi.org/doc/v2.1/man3/MPI_Scatterv.3.php
623 MPI_Scatterv(
624 nullptr, /* data being sent */
625 nullptr, /* amount of data being sent to each rank */
626 nullptr, /* displacements of data being sent */
627 MPI_INT, /* type of data being sent */
628 dest.data(), /* place to put sent data */
629 dest.size(), /* amount of data to recieve */
630 MPI_INT, /* type of data being recieved */
631 this->get_root(), /* root sending the data */
632 this->get_comm()); /*communicator */
633 }

◆ scatterv_send()

void nimble::mpicontext::scatterv_send ( const std::vector< int > & ints,
const std::vector< int > & counts,
const std::vector< int > & displacements,
std::vector< int > & dest ) const
inline
606 {
607 // See: https://www.open-mpi.org/doc/v2.1/man3/MPI_Scatterv.3.php
608 MPI_Scatterv(
609 ints.data(), /* data being sent */
610 counts.data(), /* amount of data being sent to each rank */
611 displacements.data(), /* displacements of data being sent */
612 MPI_INT, /* type of data being sent */
613 dest.data(), /* place to put sent data */
614 dest.size(), /* amount of data to recieve */
615 MPI_INT, /* type of data being recieved */
616 this->get_root(), /* root sending the data */
617 this->get_comm()); /*communicator */
618 }

◆ send() [1/2]

void nimble::mpicontext::send ( const std::vector< int > & data,
int rank,
int tag ) const
inline
119 {
120 MPI_Send(data.data(), data.size(), MPI_INT, rank, tag, this->comm);
121 }

◆ send() [2/2]

void nimble::mpicontext::send ( std::pair< int *, int > data,
int rank,
int tag ) const
inline
124 {
125 MPI_Send(data.first, data.second, MPI_INT, rank, tag, this->comm);
126 }

◆ sendpacked() [1/2]

template<class T>
void nimble::mpicontext::sendpacked ( const T & source,
int rank,
int tag ) const
inline
182 {
183 int sendcount = serialization::pack_avoid_resize(source, serialization_buffer);
184 send({serialization_buffer.data(), sendcount}, rank, tag);
185 }
void send(const std::vector< int > &data, int rank, int tag) const
Definition nimble.mpi.mpicontext.h:118
size_t pack_avoid_resize(const T &object, std::vector< dataT > &vect)
Definition nimble.mpi.serialization.h:313

◆ sendpacked() [2/2]

template<class T, class podT>
void nimble::mpicontext::sendpacked ( const T & source,
int rank,
int tag,
std::vector< podT > & buffer ) const
inline
166 {
167 serialization::pack_avoid_resize(source, buffer);
168 send(buffer, rank, tag);
169 }

◆ split_by_color() [1/3]

MPI_Comm nimble::mpicontext::split_by_color ( ) const
inline
648 {
649 return split_by_color(MPI_UNDEFINED);
650 }
MPI_Comm split_by_color() const
Definition nimble.mpi.mpicontext.h:647

◆ split_by_color() [2/3]

MPI_Comm nimble::mpicontext::split_by_color ( int color) const
inline
636 {
637 return split_by_color(color, this->get_rank());
638 }

◆ split_by_color() [3/3]

MPI_Comm nimble::mpicontext::split_by_color ( int color,
int key ) const
inline
641 {
642 MPI_Comm new_comm;
643 MPI_Comm_split(get_comm(), color, key, &new_comm);
644 return new_comm;
645 }

The documentation for this class was generated from the following file: