280 {
282 int num_node_per_elem =
element_->NumNodesPerElement();
283 int num_int_pt_per_elem =
element_->NumIntegrationPointsPerElement();
284
288
289 double ref_coord[vector_size * num_node_per_elem];
290 double cur_coord[vector_size * num_node_per_elem];
291 double def_grad_n[full_tensor_size * num_int_pt_per_elem];
292 double def_grad_np1[full_tensor_size * num_int_pt_per_elem];
293 double cauchy_stress_n[sym_tensor_size * num_int_pt_per_elem];
294 double cauchy_stress_np1[sym_tensor_size * num_int_pt_per_elem];
295 double force[vector_size * num_node_per_elem];
296
297 double* state_data_n = nullptr;
298 double* state_data_np1 = nullptr;
299 std::vector<double> state_data_n_vec;
300 std::vector<double> state_data_np1_vec;
301 int num_state_data =
material_->NumStateVariables();
302 if (num_state_data > 0) {
303 state_data_n_vec.resize(num_state_data * num_int_pt_per_elem);
304 state_data_np1_vec.resize(num_state_data * num_int_pt_per_elem);
305 state_data_n = state_data_n_vec.data();
306 state_data_np1 = state_data_np1_vec.data();
307 }
308
309 for (int node = 0; node < num_node_per_elem; node++) {
310 int node_id =
elem_conn[elem * num_node_per_elem + node];
311 for (int i = 0; i < vector_size; i++) {
313 cur_coord[node * vector_size + i] =
315 }
316 }
317
318 element_->ComputeDeformationGradients(ref_coord, cur_coord, def_grad_np1);
319
322
323
324 for (int i_ipt = 0; i_ipt < num_int_pt_per_elem; i_ipt++) {
325 for (int i_component = 0; i_component < full_tensor_size; i_component++) {
326 int def_grad_offset =
def_grad_offset_.at(i_ipt * full_tensor_size + i_component);
327 def_grad_n[i_ipt * full_tensor_size + i_component] = my_elem_data_n[def_grad_offset];
328 }
329 for (int i_component = 0; i_component < sym_tensor_size; i_component++) {
330 int stress_offset =
stress_offset_.at(i_ipt * sym_tensor_size + i_component);
331 cauchy_stress_n[i_ipt * sym_tensor_size + i_component] = my_elem_data_n[stress_offset];
332 }
333 for (int i_component = 0; i_component < num_state_data; i_component++) {
335 state_data_n[i_ipt * num_state_data + i_component] = my_elem_data_n[state_data_offset];
336 }
337 }
338
339
342 num_int_pt_per_elem,
345 def_grad_n,
346 def_grad_np1,
347 cauchy_stress_n,
348 cauchy_stress_np1,
349 state_data_n,
350 state_data_np1,
353
354
355 for (int i_ipt = 0; i_ipt < num_int_pt_per_elem; i_ipt++) {
356 for (int i_component = 0; i_component < full_tensor_size; i_component++) {
357 int def_grad_offset =
def_grad_offset_.at(i_ipt * full_tensor_size + i_component);
358 my_elem_data_np1[def_grad_offset] = def_grad_np1[i_ipt * full_tensor_size + i_component];
359 }
360 for (int i_component = 0; i_component < sym_tensor_size; i_component++) {
361 int stress_offset =
stress_offset_.at(i_ipt * sym_tensor_size + i_component);
362 my_elem_data_np1[stress_offset] = cauchy_stress_np1[i_ipt * sym_tensor_size + i_component];
363 }
364 for (int i_component = 0; i_component < num_state_data; i_component++) {
366 my_elem_data_np1[state_data_offset] = state_data_np1[i_ipt * num_state_data + i_component];
367 }
368 }
369
371 element_->ComputeNodalForces(cur_coord, cauchy_stress_np1, force);
372
373
374 for (int node = 0; node < num_node_per_elem; node++) {
375 int node_id =
elem_conn[elem * num_node_per_elem + node];
376 for (int i = 0; i < vector_size; i++) {
377#ifdef NIMBLE_HAVE_KOKKOS
378 Kokkos::atomic_add(&
internal_force[vector_size * node_id + i], force[node * vector_size + i]);
379#else
380 internal_force[vector_size * node_id + i] += force[node * vector_size + i];
381#endif
382 }
383 }
384 }
385 }
@ VECTOR
Definition nimble_data_utils.h:89
@ SYMMETRIC_TENSOR
Definition nimble_data_utils.h:90
@ FULL_TENSOR
Definition nimble_data_utils.h:91
int LengthToInt(Length length, int dim)
Definition nimble_data_utils.cc:57