NimbleSM
NimbleSM is a solid mechanics simulation code for dynamic systems
Loading...
Searching...
No Matches
nimble::ReductionClique_t Struct Reference

#include <nimble.mpi.rank_clique_reducer.h>

Public Member Functions

 ReductionClique_t ()
 
 ReductionClique_t (const std::vector< int > &index_list, int buffersize, int comm_color, const mpicontext &context)
 
 ReductionClique_t (const std::vector< int > &index_list, int buffersize, MPI_Comm clique_comm)
 
 ReductionClique_t (ReductionClique_t &&source)=default
 
ReductionClique_toperator= (ReductionClique_t &&source)=default
 
bool okayfieldsizeQ (int field_size)
 
void fitnewfieldsize (int new_field_size)
 
void resizebuffer (int newbuffersize)
 
template<int field_size>
void pack (double *source)
 
template<int field_size>
void unpack (double *dest)
 
template<int field_size, class Lookup>
void pack (Lookup &source)
 
template<int field_size, class Lookup>
void unpack (Lookup &dest)
 
void EnsureDataSafety (int field_size)
 
template<int field_size, class Lookup>
void reduce (Lookup &&data)
 
template<int field_size, class Lookup>
void asyncreduce_initialize (Lookup &&databuffer)
 
bool Iallreduce_completedQ ()
 
template<int field_size, class Lookup>
bool asyncreduce_finalize (Lookup &&databuffer)
 
int GetNumIndices ()
 
int const * GetIndices ()
 
std::vector< int > GetMPICommWorldRanks ()
 

Public Attributes

std::unique_ptr< int[]> indices
 
std::unique_ptr< double[]> sendbuffer
 
std::unique_ptr< double[]> recvbuffer
 
int buffersize
 
int n_indices
 
MPI_Comm clique_comm
 
MPI_Request Iallreduce_request
 
bool exists_active_asyncreduce_request = false
 

Constructor & Destructor Documentation

◆ ReductionClique_t() [1/4]

nimble::ReductionClique_t::ReductionClique_t ( )
inline
71{}

◆ ReductionClique_t() [2/4]

nimble::ReductionClique_t::ReductionClique_t ( const std::vector< int > & index_list,
int buffersize,
int comm_color,
const mpicontext & context )
inline
86 : indices{new int[index_list.size()]},
87 sendbuffer{new double[buffersize]},
88 recvbuffer{new double[buffersize]},
90 n_indices{(int)index_list.size()},
91 // Actual instantiation of clique_comm is performed by MPI_Comm_split
93 {
94 std::copy_n(index_list.data(), index_list.size(), indices.get());
95 // printf("rank %d: Splitting %d with comm color %d\n", context.get_rank(),
96 // context.get_comm(), comm_color);
97 MPI_Comm_split(context.get_comm(), comm_color, context.get_rank(), &clique_comm);
98 }
int n_indices
Definition nimble.mpi.rank_clique_reducer.h:66
std::unique_ptr< int[]> indices
Definition nimble.mpi.rank_clique_reducer.h:62
std::unique_ptr< double[]> recvbuffer
Definition nimble.mpi.rank_clique_reducer.h:64
MPI_Comm clique_comm
Definition nimble.mpi.rank_clique_reducer.h:67
std::unique_ptr< double[]> sendbuffer
Definition nimble.mpi.rank_clique_reducer.h:63
int buffersize
Definition nimble.mpi.rank_clique_reducer.h:65

◆ ReductionClique_t() [3/4]

nimble::ReductionClique_t::ReductionClique_t ( const std::vector< int > & index_list,
int buffersize,
MPI_Comm clique_comm )
inline
100 : indices{new int[index_list.size()]},
101 sendbuffer{new double[buffersize]},
102 recvbuffer{new double[buffersize]},
104 n_indices{(int)index_list.size()},
106 {
107 std::copy_n(index_list.data(), index_list.size(), indices.get());
108 }

◆ ReductionClique_t() [4/4]

nimble::ReductionClique_t::ReductionClique_t ( ReductionClique_t && source)
default

Member Function Documentation

◆ asyncreduce_finalize()

template<int field_size, class Lookup>
bool nimble::ReductionClique_t::asyncreduce_finalize ( Lookup && databuffer)
inline
244 {
246 if (Iallreduce_completedQ()) {
247 unpack<field_size>(databuffer);
249 return true;
250 } else {
251 return false;
252 }
253 } else {
255 "asyncreduce_finalize(data) was called "
256 "without an active asynchronous reduce request.");
257 }
258 }
#define NIMBLE_ABORT(...)
Definition nimble_macros.h:87
bool Iallreduce_completedQ()
Definition nimble.mpi.rank_clique_reducer.h:224
bool exists_active_asyncreduce_request
Definition nimble.mpi.rank_clique_reducer.h:69
void unpack(double *dest)
Definition nimble.mpi.rank_clique_reducer.h:147

◆ asyncreduce_initialize()

template<int field_size, class Lookup>
void nimble::ReductionClique_t::asyncreduce_initialize ( Lookup && databuffer)
inline
200 {
203 "asyncreduce_initialize(data) was called "
204 "when an active asynchronous reduce already exists");
205 }
206
207 EnsureDataSafety(field_size);
208 pack<field_size>(databuffer);
209 MPI_Iallreduce(
210 sendbuffer.get(),
211 recvbuffer.get(),
212 n_indices * field_size,
213 MPI_DOUBLE,
214 MPI_SUM,
218 }
MPI_Request Iallreduce_request
Definition nimble.mpi.rank_clique_reducer.h:68
void EnsureDataSafety(int field_size)
Definition nimble.mpi.rank_clique_reducer.h:180
void pack(double *source)
Definition nimble.mpi.rank_clique_reducer.h:132

◆ EnsureDataSafety()

void nimble::ReductionClique_t::EnsureDataSafety ( int field_size)
inline
181 {
182 if (!okayfieldsizeQ(field_size)) { throw std::invalid_argument("Field size too big"); }
183 }
bool okayfieldsizeQ(int field_size)
Definition nimble.mpi.rank_clique_reducer.h:113

◆ fitnewfieldsize()

void nimble::ReductionClique_t::fitnewfieldsize ( int new_field_size)
inline
119 {
120 resizebuffer(n_indices * new_field_size);
121 }
void resizebuffer(int newbuffersize)
Definition nimble.mpi.rank_clique_reducer.h:123

◆ GetIndices()

int const * nimble::ReductionClique_t::GetIndices ( )
inline
266 {
267 return indices.get();
268 }

◆ GetMPICommWorldRanks()

std::vector< int > nimble::ReductionClique_t::GetMPICommWorldRanks ( )
inline
271 {
272 int my_mpi_comm_world_rank;
273 MPI_Comm_rank(MPI_COMM_WORLD, &my_mpi_comm_world_rank);
274 int clique_comm_size;
275 MPI_Comm_size(clique_comm, &clique_comm_size);
276 std::vector<int> mpi_comm_world_ranks(clique_comm_size);
277 MPI_Allgather(&my_mpi_comm_world_rank, 1, MPI_INT, mpi_comm_world_ranks.data(), 1, MPI_INT, clique_comm);
278 std::sort(mpi_comm_world_ranks.begin(), mpi_comm_world_ranks.end());
279 return mpi_comm_world_ranks;
280 }

◆ GetNumIndices()

int nimble::ReductionClique_t::GetNumIndices ( )
inline
261 {
262 return n_indices;
263 }

◆ Iallreduce_completedQ()

bool nimble::ReductionClique_t::Iallreduce_completedQ ( )
inline
225 {
227 int flag = 0;
228 MPI_Test(&Iallreduce_request, &flag, MPI_STATUS_IGNORE);
229 // flag is set to true if the allreduce has completed
230 return flag != 0;
231 } else {
233 "Iallreduce_completedQ() was called "
234 "without an active asynchronous reduce request.");
235 }
236 }

◆ okayfieldsizeQ()

bool nimble::ReductionClique_t::okayfieldsizeQ ( int field_size)
inline
114 {
115 return field_size * n_indices <= buffersize;
116 }

◆ operator=()

ReductionClique_t & nimble::ReductionClique_t::operator= ( ReductionClique_t && source)
default

◆ pack() [1/2]

template<int field_size>
void nimble::ReductionClique_t::pack ( double * source)
inline
133 {
134 double* destscan = sendbuffer.get();
135 int * index_ptr = indices.get(), *index_ptr_end = index_ptr + n_indices;
136 for (; index_ptr < index_ptr_end; ++index_ptr) {
137 double* sourcescan = source + (*index_ptr) * field_size;
138 for (int j = 0; j < field_size; ++j) {
139 *destscan = *sourcescan;
140 ++destscan;
141 ++sourcescan;
142 }
143 }
144 }

◆ pack() [2/2]

template<int field_size, class Lookup>
void nimble::ReductionClique_t::pack ( Lookup & source)
inline
163 {
164 double* destscan = sendbuffer.get();
165
166 for (int index : quanta::arrayview_t<int>(indices.get(), n_indices)) {
167 for (int j = 0; j < field_size; ++j) { *destscan++ = source(index, j); }
168 }
169 }

◆ reduce()

template<int field_size, class Lookup>
void nimble::ReductionClique_t::reduce ( Lookup && data)
inline
188 {
189 EnsureDataSafety(field_size);
190 pack<field_size>(data);
191 MPI_Allreduce(sendbuffer.get(), recvbuffer.get(), n_indices * field_size, MPI_DOUBLE, MPI_SUM, clique_comm);
192 unpack<field_size>(data);
193 }

◆ resizebuffer()

void nimble::ReductionClique_t::resizebuffer ( int newbuffersize)
inline
124 {
125 sendbuffer.reset(new double[newbuffersize]);
126 recvbuffer.reset(new double[newbuffersize]);
127 buffersize = newbuffersize;
128 }

◆ unpack() [1/2]

template<int field_size>
void nimble::ReductionClique_t::unpack ( double * dest)
inline
148 {
149 double* sourcescan = recvbuffer.get();
150 int * index_ptr = indices.get(), *index_ptr_end = index_ptr + n_indices;
151 for (; index_ptr < index_ptr_end; ++index_ptr) {
152 double* destscan = dest + (*index_ptr) * field_size;
153 for (int j = 0; j < field_size; ++j) {
154 *destscan = *sourcescan;
155 ++destscan;
156 ++sourcescan;
157 }
158 }
159 }

◆ unpack() [2/2]

template<int field_size, class Lookup>
void nimble::ReductionClique_t::unpack ( Lookup & dest)
inline
173 {
174 double* sourcescan = recvbuffer.get();
175 for (int index : quanta::arrayview_t<int>(indices.get(), n_indices)) {
176 for (int j = 0; j < field_size; ++j) { dest(index, j) = *sourcescan++; }
177 }
178 }

Member Data Documentation

◆ buffersize

int nimble::ReductionClique_t::buffersize

◆ clique_comm

MPI_Comm nimble::ReductionClique_t::clique_comm

◆ exists_active_asyncreduce_request

bool nimble::ReductionClique_t::exists_active_asyncreduce_request = false

◆ Iallreduce_request

MPI_Request nimble::ReductionClique_t::Iallreduce_request

◆ indices

std::unique_ptr<int[]> nimble::ReductionClique_t::indices

◆ n_indices

int nimble::ReductionClique_t::n_indices

◆ recvbuffer

std::unique_ptr<double[]> nimble::ReductionClique_t::recvbuffer

◆ sendbuffer

std::unique_ptr<double[]> nimble::ReductionClique_t::sendbuffer

The documentation for this struct was generated from the following file: