ESPResSo
Extensible Simulation Package for Research on Soft Matter Systems
Loading...
Searching...
No Matches
RegularDecomposition.cpp
Go to the documentation of this file.
1/*
2 * Copyright (C) 2010-2022 The ESPResSo project
3 * Copyright (C) 2002,2003,2004,2005,2006,2007,2008,2009,2010
4 * Max-Planck-Institute for Polymer Research, Theory Group
5 *
6 * This file is part of ESPResSo.
7 *
8 * ESPResSo is free software: you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation, either version 3 of the License, or
11 * (at your option) any later version.
12 *
13 * ESPResSo is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
23
24#include "cell_system/Cell.hpp"
25
26#include "communication.hpp"
28#include "errorhandling.hpp"
29#include "system/System.hpp"
30
31#include <utils/Vector.hpp>
32#include <utils/index.hpp>
35
36#include <boost/container/flat_set.hpp>
37#include <boost/mpi/collectives.hpp>
38#include <boost/mpi/communicator.hpp>
39#include <boost/range/algorithm/reverse.hpp>
40#include <boost/range/numeric.hpp>
41
42#include <algorithm>
43#include <array>
44#include <cmath>
45#include <cstddef>
46#include <functional>
47#include <initializer_list>
48#include <iterator>
49#include <utility>
50#include <vector>
51
52int RegularDecomposition::position_to_cell_index(
53 Utils::Vector3d const &pos) const {
55
56 for (auto i = 0u; i < 3u; i++) {
57 cpos[i] = static_cast<int>(std::floor(pos[i] * inv_cell_size[i])) + 1 -
58 cell_offset[i];
59
60 /* particles outside our box. Still take them if
61 nonperiodic boundary. We also accept the particle if we are at
62 the box boundary, and the particle is within the box. In this case
63 the particle belongs here and could otherwise potentially be dismissed
64 due to rounding errors. */
65 if (cpos[i] < 1) {
66 if ((!m_box.periodic(i) or (pos[i] >= m_box.length()[i])) and
67 m_local_box.boundary()[2u * i])
68 cpos[i] = 1;
69 else
70 return -1;
71 } else if (cpos[i] > cell_grid[i]) {
72 if ((!m_box.periodic(i) or (pos[i] < m_box.length()[i])) and
73 m_local_box.boundary()[2u * i + 1u])
74 cpos[i] = cell_grid[i];
75 else
76 return -1;
77 }
78 }
79
81}
82
83void RegularDecomposition::move_if_local(
85 std::vector<ParticleChange> &modified_cells) {
86 for (auto &part : src) {
87 auto target_cell = position_to_cell(part.pos());
88
89 if (target_cell) {
90 target_cell->particles().insert(std::move(part));
91 modified_cells.emplace_back(ModifiedList{target_cell->particles()});
92 } else {
93 rest.insert(std::move(part));
94 }
95 }
96
97 src.clear();
98}
99
100void RegularDecomposition::move_left_or_right(ParticleList &src,
103 int dir) const {
104 auto const is_open_boundary_left = m_local_box.boundary()[2 * dir] != 0;
105 auto const is_open_boundary_right = m_local_box.boundary()[2 * dir + 1] != 0;
108 auto const my_left = m_local_box.my_left()[dir];
109 auto const my_right = m_local_box.my_right()[dir];
110 for (auto it = src.begin(); it != src.end();) {
111 auto const pos = it->pos()[dir];
112 if (m_box.get_mi_coord(pos, my_right, dir) >= 0. and can_move_right) {
113 right.insert(std::move(*it));
114 it = src.erase(it);
115 } else if (m_box.get_mi_coord(pos, my_left, dir) < 0. and can_move_left) {
116 left.insert(std::move(*it));
117 it = src.erase(it);
118 } else {
119 ++it;
120 }
121 }
122}
123
124void RegularDecomposition::exchange_neighbors(
125 ParticleList &pl, std::vector<ParticleChange> &modified_cells) {
126 auto const node_neighbors = Utils::Mpi::cart_neighbors<3>(m_comm);
128
129 for (int dir = 0; dir < 3; dir++) {
130 /* Single node direction, no action needed. */
131 if (Utils::Mpi::cart_get<3>(m_comm).dims[dir] == 1) {
132 continue;
133 /* In this (common) case left and right neighbors are
134 the same, and we need only one communication */
135 }
136 if (Utils::Mpi::cart_get<3>(m_comm).dims[dir] == 2) {
137 move_left_or_right(pl, send_buf_l, send_buf_l, dir);
138
140 node_neighbors[2 * dir], 0, recv_buf_l);
141
142 send_buf_l.clear();
143 } else {
144 using boost::mpi::request;
146
147 move_left_or_right(pl, send_buf_l, send_buf_r, dir);
148
149 auto req_l = isendrecv(m_comm, node_neighbors[2 * dir], 0, send_buf_l,
150 node_neighbors[2 * dir], 0, recv_buf_l);
151 auto req_r = isendrecv(m_comm, node_neighbors[2 * dir + 1], 0, send_buf_r,
152 node_neighbors[2 * dir + 1], 0, recv_buf_r);
153
154 std::array<request, 4> reqs{{req_l[0], req_l[1], req_r[0], req_r[1]}};
155 boost::mpi::wait_all(reqs.begin(), reqs.end());
156
157 send_buf_l.clear();
158 send_buf_r.clear();
159 }
160
161 move_if_local(recv_buf_l, pl, modified_cells);
162 move_if_local(recv_buf_r, pl, modified_cells);
163 }
164}
165
166/**
167 * @brief Fold coordinates to box and reset the old position.
168 */
169static void fold_and_reset(Particle &p, BoxGeometry const &box_geo) {
170 box_geo.fold_position(p.pos(), p.image_box());
171
173}
174
176 std::vector<ParticleChange> &diff) {
178
179 for (auto &c : local_cells()) {
180 for (auto it = c->particles().begin(); it != c->particles().end();) {
182
184
185 /* Particle is in place */
186 if (target_cell == c) {
187 std::advance(it, 1);
188 continue;
189 }
190
191 auto p = std::move(*it);
192 it = c->particles().erase(it);
193 diff.emplace_back(ModifiedList{c->particles()});
194
195 /* Particle is not local */
196 if (target_cell == nullptr) {
197 diff.emplace_back(RemovedParticle{p.id()});
198 displaced_parts.insert(std::move(p));
199 }
200 /* Particle belongs on this node but is in the wrong cell. */
201 else if (target_cell != c) {
202 target_cell->particles().insert(std::move(p));
203 diff.emplace_back(ModifiedList{target_cell->particles()});
204 }
205 }
206 }
207
208 if (global) {
209 auto const grid = Utils::Mpi::cart_get<3>(m_comm).dims;
210 /* Worst case we need grid - 1 rounds per direction.
211 * This correctly implies that if there is only one node,
212 * no action should be taken. */
213 int rounds_left = grid[0] + grid[1] + grid[2] - 3;
214 for (; rounds_left > 0; rounds_left--) {
215 exchange_neighbors(displaced_parts, diff);
216
217 auto left_over = boost::mpi::all_reduce(m_comm, displaced_parts.size(),
218 std::plus<std::size_t>());
219
220 if (left_over == 0) {
221 break;
222 }
223 }
224 } else {
225 exchange_neighbors(displaced_parts, diff);
226 }
227
228 if (not displaced_parts.empty()) {
229 auto sort_cell = local_cells()[0];
230
231 for (auto &part : displaced_parts) {
232 runtimeErrorMsg() << "Particle " << part.id() << " moved more "
233 << "than one local box length in one timestep";
234 sort_cell->particles().insert(std::move(part));
235
236 diff.emplace_back(ModifiedList{sort_cell->particles()});
237 }
238 }
239}
240
241void RegularDecomposition::mark_cells() {
242 m_local_cells.clear();
243 m_ghost_cells.clear();
244
245 int cnt_c = 0;
246 for (int o = 0; o < ghost_cell_grid[2]; o++)
247 for (int n = 0; n < ghost_cell_grid[1]; n++)
248 for (int m = 0; m < ghost_cell_grid[0]; m++) {
249 if ((m > 0 && m < ghost_cell_grid[0] - 1 && n > 0 &&
250 n < ghost_cell_grid[1] - 1 && o > 0 && o < ghost_cell_grid[2] - 1))
251 m_local_cells.push_back(&cells.at(cnt_c++));
252 else
253 m_ghost_cells.push_back(&cells.at(cnt_c++));
254 }
255}
256
257void RegularDecomposition::fill_comm_cell_lists(ParticleList **part_lists,
258 Utils::Vector3i const &lc,
259 Utils::Vector3i const &hc) {
260 for (int o = lc[0]; o <= hc[0]; o++)
261 for (int n = lc[1]; n <= hc[1]; n++)
262 for (int m = lc[2]; m <= hc[2]; m++) {
263 auto const i = Utils::get_linear_index(o, n, m, ghost_cell_grid);
264
265 *part_lists++ = &(cells.at(i).particles());
266 }
267}
268
270 auto dir_max_range = [this](unsigned int i) {
271 return std::min(0.5 * m_box.length()[i], m_local_box.length()[i]);
272 };
273
274 return {dir_max_range(0u), dir_max_range(1u), dir_max_range(2u)};
275}
276
278int RegularDecomposition::calc_processor_min_num_cells() const {
279 /* the minimal number of cells can be lower if there are at least two nodes
280 serving a direction,
281 since this also ensures that the cell size is at most half the box
282 length. However, if there is only one processor for a direction, there
283 have to be at least two cells for this direction. */
284 return boost::accumulate(Utils::Mpi::cart_get<3>(m_comm).dims, 1,
285 [](int n_cells, int grid) {
286 return (grid == 1) ? 2 * n_cells : n_cells;
287 });
288}
289
290void RegularDecomposition::create_cell_grid(double range) {
291 auto const cart_info = Utils::Mpi::cart_get<3>(m_comm);
292
293 int n_local_cells;
294 auto cell_range = Utils::Vector3d::broadcast(range);
295 auto const min_num_cells = calc_processor_min_num_cells();
296
297 if (range <= 0.) {
298 /* this is the non-interacting case */
299 auto const cells_per_dir =
300 static_cast<int>(std::ceil(std::cbrt(min_num_cells)));
301
304 } else {
305 /* Calculate initial cell grid */
306 auto const &local_box_l = m_local_box.length();
307 auto const volume = Utils::product(local_box_l);
308 auto const scale = std::cbrt(RegularDecomposition::max_num_cells / volume);
309
310 for (auto i = 0u; i < 3u; i++) {
311 /* this is at least 1 */
312 cell_grid[i] = static_cast<int>(std::ceil(local_box_l[i] * scale));
313 cell_range[i] = local_box_l[i] / static_cast<double>(cell_grid[i]);
314
315 if (cell_range[i] < range) {
316 /* ok, too many cells for this direction, set to minimum */
317 cell_grid[i] = static_cast<int>(std::floor(local_box_l[i] / range));
318 if (cell_grid[i] < 1) {
320 << "interaction range " << range << " in direction " << i
321 << " is larger than the local box size " << local_box_l[i];
322 cell_grid[i] = 1;
323 }
324 cell_range[i] = local_box_l[i] / static_cast<double>(cell_grid[i]);
325 }
326 }
327
328 /* It may be necessary to asymmetrically assign the scaling to the
329 coordinates, which the above approach will not do.
330 For a symmetric box, it gives a symmetric result. Here we correct that.
331 */
332 for (;;) {
334
335 /* done */
336 if (n_local_cells <= RegularDecomposition::max_num_cells)
337 break;
338
339 /* find coordinate with the smallest cell range */
340 auto min_ind = 0u;
341 auto min_size = cell_range[0];
342
343 for (auto i = 1u; i < 3u; ++i) {
344 if (cell_grid[i] > 1 and cell_range[i] < min_size) {
345 min_ind = i;
346 min_size = cell_range[i];
347 }
348 }
349
352 }
353
354 /* sanity check */
356 runtimeErrorMsg() << "number of cells " << n_local_cells
357 << " is smaller than minimum " << min_num_cells
358 << ": either interaction range is too large for "
359 << "the current skin (range=" << range << ", "
360 << "half_local_box_l=[" << local_box_l / 2. << "]) "
361 << "or min_num_cells too large";
362 }
363 }
364
365 if (n_local_cells > RegularDecomposition::max_num_cells) {
366 runtimeErrorMsg() << "no suitable cell grid found";
367 }
368
369 auto const node_pos = cart_info.coords;
370
371 /* now set all dependent variables */
372 int new_cells = 1;
373 for (auto i = 0u; i < 3u; i++) {
374 ghost_cell_grid[i] = cell_grid[i] + 2;
376 cell_size[i] = m_local_box.length()[i] / static_cast<double>(cell_grid[i]);
377 inv_cell_size[i] = 1.0 / cell_size[i];
378 cell_offset[i] = node_pos[i] * cell_grid[i];
379 }
380
381 /* allocate cell array and cell pointer arrays */
382 cells.clear();
383 cells.resize(static_cast<unsigned int>(new_cells));
386}
387
388template <class K, class Comparator> auto make_flat_set(Comparator &&comp) {
389 return boost::container::flat_set<K, std::remove_reference_t<Comparator>>(
390 std::forward<Comparator>(comp));
391}
392
393void RegularDecomposition::init_cell_interactions() {
394
395 // Note: the global index for physical cells is 0-based.
396 // I.e., a global index of -1 refers to a ghost cell.
397 auto const halo = Utils::Vector3i{1, 1, 1}; // number of ghost layers
398 auto const cart_info = Utils::Mpi::cart_get<3>(m_comm);
399 // 3D index of the MPI rank in the Cartesian grid of MPI ranks
400 auto const &node_pos = cart_info.coords;
401 // size of the Cartesian grid of MPI ranks
402 auto const &node_grid = ::communicator.node_grid;
404 // MD cell index of lower halo layer on this MPI rank
405 auto const global_size = hadamard_product(node_grid, cell_grid);
406
407 // is a cell at the system boundary in the given coord
409 return (cell_idx[coord] == 0 or cell_idx[coord] == global_size[coord]);
410 };
411
412 // For the fully connected feature (cells that don't share at least a corner)
413 // only apply if one cell is a ghost cell (i.e. connections across the
414 // periodic boundary.
416 Utils::Vector3i b) {
418 auto const [fc_normal, fc_dir] = *fully_connected_boundary();
419 auto const involves_ghost_cell =
421 b[fc_normal] == -1 or b[fc_normal] == global_size[fc_normal]);
423 // check if cells do not share at least a corner
424 return std::abs((a - b)[fc_dir]) > 1;
425 }
426 }
427 return false;
428 };
429
430 /* Translate a node local index (relative to the origin of the local grid)
431 * to a global index. */
432 auto global_index =
435 };
436
437 /* Linear index in the global cell grid. */
440
442 };
443
444 /* Translate a global index into a local one */
445 auto local_index =
448 };
449
450 // sanity checks
452 auto const [fc_normal, fc_dir] = *fully_connected_boundary();
453 if (fc_normal == fc_dir) {
454 throw std::domain_error("fully_connected_boundary normal and connection "
455 "coordinates need to differ.");
456 }
457 if (node_grid[fc_dir] != 1) {
458 throw std::runtime_error(
459 "The MPI nodegrid must be 1 in the fully connected direction.");
460 }
461 }
462
463 /* We only consider local cells (e.g. not halo cells), which
464 * span the range [(1,1,1), cell_grid) in local coordinates. */
465 auto const start = global_index(Utils::Vector3i{1, 1, 1});
466 auto const end = start + cell_grid;
467
468 bool one_mpi_rank = m_comm.size() == 1;
469
470 /* loop all local cells */
471 for (int o = start[2]; o < end[2]; o++)
472 for (int n = start[1]; n < end[1]; n++)
473 for (int m = start[0]; m < end[0]; m++) {
474 /* next-nearest neighbors in every direction */
475 Utils::Vector3i lower_index = {m - 1, n - 1, o - 1};
476 Utils::Vector3i upper_index = {m + 1, n + 1, o + 1};
477
478 /* In the fully connected case, we consider all cells
479 * in the direction as neighbors, not only the nearest ones.
480 // */
483
484 // Fully connected is only needed at the box surface
485 if (at_boundary(fc_boundary, {m, n, o})) {
488 }
489 }
490
491 /* In non-periodic directions, the halo needs not
492 * be considered. */
493 for (auto i = 0u; i < 3u; i++) {
494 if (not m_box.periodic(i)) {
495 lower_index[i] = std::max(0, lower_index[i]);
496 upper_index[i] = std::min(global_size[i] - 1, upper_index[i]);
497 }
498 }
499
500 /* Unique set of neighbors, cells are compared by their linear
501 * index in the global cell grid. */
502 auto neighbors = make_flat_set<Utils::Vector3i>(
503 [&](Utils::Vector3i const &a, Utils::Vector3i const &b) {
505 });
506
507 /* Collect neighbors */
508 for (int p = lower_index[2]; p <= upper_index[2]; p++)
509 for (int q = lower_index[1]; q <= upper_index[1]; q++)
510 for (int r = lower_index[0]; r <= upper_index[0]; r++) {
512 // Avoid fully connecting the boundary layer and the
513 // next INNER layer
514 if (fcb_is_inner_connection({m, n, o}, {r, q, p}))
515 continue;
516 }
517 neighbors.insert(Utils::Vector3i{r, q, p});
518 }
519
520 /* Red-black partition by global index. */
521 auto const ind1 = folded_linear_index({m, n, o});
522
523 std::vector<Cell *> red_neighbors;
524 std::vector<Cell *> black_neighbors;
525
526 /* If we are running on a single MPI rank, it is not necessary to use
527 * ghost cells. Instead of adding a ghost cell as neighbor,
528 * we directly connect to the corresponding
529 * physical cell across the periodic boundary */
530 for (auto &neighbor : neighbors) {
531 if (one_mpi_rank) {
532 for (auto coord : {0u, 1u, 2u}) {
533 if (neighbor[coord] == -1) {
535 } else if (neighbor[coord] == cell_grid[coord]) {
537 }
538 }
539 }
540 auto const ind2 = folded_linear_index(neighbor);
541 /* Exclude cell itself */
542 if (ind1 == ind2)
543 continue;
544
545 auto cell = &cells.at(
547
548 // Divide red and black neighbors
549 if (ind2 > ind1) {
550 red_neighbors.push_back(cell);
551 } else {
552 black_neighbors.push_back(cell);
553 }
554 }
555
556 // Assign neighbors to the cell
559 }
560}
561
562namespace {
563/** Revert the order of a communicator: After calling this the
564 * communicator is working in reverted order with exchanged
565 * communication types GHOST_SEND <-> GHOST_RECV.
566 */
568 /* revert order */
569 boost::reverse(comm.communications);
570
571 /* exchange SEND/RECV */
572 for (auto &c : comm.communications) {
573 if (c.type == GHOST_SEND)
574 c.type = GHOST_RECV;
575 else if (c.type == GHOST_RECV)
576 c.type = GHOST_SEND;
577 else if (c.type == GHOST_LOCL) {
578 boost::reverse(c.part_lists);
579 }
580 }
581}
582
583/** Of every two communication rounds, set the first receivers to prefetch and
584 * poststore
585 */
587 for (auto it = comm.communications.begin(); it != comm.communications.end();
588 it += 2) {
589 auto next = std::next(it);
590 if (it->type == GHOST_RECV && next->type == GHOST_SEND) {
593 }
594 }
595}
596
597} // namespace
598
599GhostCommunicator RegularDecomposition::prepare_comm() {
600 // When running on a single MPI rank, the ghost cells are not needed,
601 // as the corresponding physical cell is available on the same MPI rank.
602 // The cell neighborships are set up such that cells across the periodic
603 // boundaries are connected as neighbors directly.
604 if (m_comm.size() == 1)
605 return {};
606
607 int dir, lr, i, cnt, n_comm_cells[3];
608 Utils::Vector3i lc{}, hc{}, done{};
609
610 auto const comm_info = Utils::Mpi::cart_get<3>(m_comm);
611 auto const node_neighbors = Utils::Mpi::cart_neighbors<3>(m_comm);
612
613 /* calculate number of communications */
614 std::size_t num = 0;
615 for (dir = 0; dir < 3; dir++) {
616 for (lr = 0; lr < 2; lr++) {
617 /* No communication for border of non periodic direction */
618 if (comm_info.dims[dir] == 1)
619 num++;
620 else
621 num += 2;
622 }
623 }
624
625 /* prepare communicator */
627
628 /* number of cells to communicate in a direction */
629 n_comm_cells[0] = cell_grid[1] * cell_grid[2];
632
633 cnt = 0;
634 /* direction loop: x, y, z */
635 for (dir = 0; dir < 3; dir++) {
636 lc[(dir + 1) % 3] = 1 - done[(dir + 1) % 3];
637 lc[(dir + 2) % 3] = 1 - done[(dir + 2) % 3];
638 hc[(dir + 1) % 3] = cell_grid[(dir + 1) % 3] + done[(dir + 1) % 3];
639 hc[(dir + 2) % 3] = cell_grid[(dir + 2) % 3] + done[(dir + 2) % 3];
640 /* lr loop: left right */
641 /* here we could in principle build in a one sided ghost
642 communication, simply by taking the lr loop only over one
643 value */
644 for (lr = 0; lr < 2; lr++) {
645 if (comm_info.dims[dir] == 1) {
646 /* just copy cells on a single node */
647 ghost_comm.communications[cnt].type = GHOST_LOCL;
648 ghost_comm.communications[cnt].node = m_comm.rank();
649
650 /* Buffer has to contain Send and Recv cells -> factor 2 */
651 ghost_comm.communications[cnt].part_lists.resize(2 * n_comm_cells[dir]);
652
653 /* fill send ghost_comm cells */
654 lc[dir] = hc[dir] = 1 + lr * (cell_grid[dir] - 1);
655
656 fill_comm_cell_lists(ghost_comm.communications[cnt].part_lists.data(),
657 lc, hc);
658
659 /* fill recv ghost_comm cells */
660 lc[dir] = hc[dir] = 0 + (1 - lr) * (cell_grid[dir] + 1);
661
662 /* place receive cells after send cells */
663 fill_comm_cell_lists(
664 &ghost_comm.communications[cnt].part_lists[n_comm_cells[dir]], lc,
665 hc);
666
667 cnt++;
668 } else {
669 /* i: send/recv loop */
670 for (i = 0; i < 2; i++) {
671 if ((comm_info.coords[dir] + i) % 2 == 0) {
672 ghost_comm.communications[cnt].type = GHOST_SEND;
673 ghost_comm.communications[cnt].node = node_neighbors[2 * dir + lr];
674 ghost_comm.communications[cnt].part_lists.resize(n_comm_cells[dir]);
675 /* prepare folding of ghost positions */
676
677 lc[dir] = hc[dir] = 1 + lr * (cell_grid[dir] - 1);
678
679 fill_comm_cell_lists(
680 ghost_comm.communications[cnt].part_lists.data(), lc, hc);
681 cnt++;
682 }
683 if ((comm_info.coords[dir] + (1 - i)) % 2 == 0) {
684 ghost_comm.communications[cnt].type = GHOST_RECV;
685 ghost_comm.communications[cnt].node =
686 node_neighbors[2 * dir + (1 - lr)];
687 ghost_comm.communications[cnt].part_lists.resize(n_comm_cells[dir]);
688
689 lc[dir] = hc[dir] = (1 - lr) * (cell_grid[dir] + 1);
690
691 fill_comm_cell_lists(
692 ghost_comm.communications[cnt].part_lists.data(), lc, hc);
693 cnt++;
694 }
695 }
696 }
697 done[dir] = 1;
698 }
699 }
700
701 return ghost_comm;
702}
703
705 boost::mpi::communicator comm, double range, BoxGeometry const &box_geo,
706 LocalBox const &local_geo,
707 std::optional<std::pair<int, int>> fully_connected)
708 : m_comm(std::move(comm)), m_box(box_geo), m_local_box(local_geo),
709 m_fully_connected_boundary(std::move(fully_connected)) {
710
711 /* set up new regular decomposition cell structure */
712 create_cell_grid(range);
713
714 /* setup cell neighbors */
715 init_cell_interactions();
716
717 /* mark local and ghost cells */
718 mark_cells();
719
720 /* create communicators */
721 m_exchange_ghosts_comm = prepare_comm();
722 m_collect_ghost_force_comm = prepare_comm();
723
724 /* collect forces has to be done in reverted order! */
725 revert_comm_order(m_collect_ghost_force_comm);
726
727 assign_prefetches(m_exchange_ghosts_comm);
728 assign_prefetches(m_collect_ghost_force_comm);
729}
static int coord(std::string const &s)
auto make_flat_set(Comparator &&comp)
static void fold_and_reset(Particle &p, BoxGeometry const &box_geo)
Fold coordinates to box and reset the old position.
Vector implementation and trait types for boost qvm interoperability.
T get_mi_coord(T a, T b, unsigned coord) const
Get the minimum-image distance between two coordinates.
Utils::Vector3d const & length() const
Box length.
constexpr bool periodic(unsigned coord) const
Check periodicity in direction.
void fold_position(Utils::Vector3d &pos, Utils::Vector3i &image_box) const
Fold coordinates to primary simulation box in-place.
auto const & my_right() const
Right (top, back) corner of this nodes local box.
Definition LocalBox.hpp:47
auto const & boundary() const
Boundary information for the local box.
Definition LocalBox.hpp:59
auto const & my_left() const
Left (bottom, front) corner of this nodes local box.
Definition LocalBox.hpp:45
auto const & length() const
Dimensions of the box a single node is responsible for.
Definition LocalBox.hpp:49
T & insert(T const &v)
Insert an element into the container.
Definition Bag.hpp:147
DEVICE_QUALIFIER constexpr size_type size() const noexcept
Definition Array.hpp:166
static DEVICE_QUALIFIER constexpr Vector< T, N > broadcast(typename Base::value_type const &value) noexcept
Create a vector that has all entries set to the same value.
Definition Vector.hpp:132
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
Communicator communicator
This file contains the errorhandling code for severe errors, like a broken bond or illegal parameter ...
#define runtimeErrorMsg()
#define GHOST_RECV
recv from a single node
Definition ghosts.hpp:108
#define GHOST_PSTSTORE
additional flag for poststoring
Definition ghosts.hpp:121
#define GHOST_LOCL
transfer data from cell to cell on this node
Definition ghosts.hpp:114
#define GHOST_SEND
send to a single node
Definition ghosts.hpp:106
#define GHOST_PREFETCH
additional flag for prefetching
Definition ghosts.hpp:119
std::array< mpi::request, 2 > isendrecv(mpi::communicator const &comm, int dest, int stag, const T &sval, int src, int rtag, T &rval)
Definition sendrecv.hpp:73
mpi::status sendrecv(mpi::communicator const &comm, int dest, int stag, const T &sval, int src, int rtag, T &rval)
Definition sendrecv.hpp:66
T product(Vector< T, N > const &v)
Definition Vector.hpp:373
int get_linear_index(int a, int b, int c, const Vector3i &adim)
Definition index.hpp:35
auto hadamard_product(Vector< T, N > const &a, Vector< U, N > const &b)
Definition Vector.hpp:378
void assign_prefetches(GhostCommunicator &comm)
Of every two communication rounds, set the first receivers to prefetch and poststore.
void revert_comm_order(GhostCommunicator &comm)
Revert the order of a communicator: After calling this the communicator is working in reverted order ...
STL namespace.
Utils::Vector3i node_grid
Properties for a ghost communication.
Definition ghosts.hpp:159
std::vector< GhostCommunication > communications
List of ghost communications.
Definition ghosts.hpp:168
Struct holding all information for one particle.
Definition Particle.hpp:450
auto & pos_at_last_verlet_update()
Definition Particle.hpp:497
auto const & image_box() const
Definition Particle.hpp:499
auto const & pos() const
Definition Particle.hpp:486
GhostCommunicator m_exchange_ghosts_comm
Utils::Vector3i ghost_cell_grid
linked cell grid with ghost frame.
Utils::Vector3d max_cutoff() const override
Utils::Vector3d inv_cell_size
inverse cell_size.
std::vector< Cell * > m_ghost_cells
BoxGeometry const & m_box
Cell * particle_to_cell(Particle const &p) override
void resort(bool global, std::vector< ParticleChange > &diff) override
std::vector< Cell > cells
RegularDecomposition(boost::mpi::communicator comm, double range, BoxGeometry const &box_geo, LocalBox const &local_geo, std::optional< std::pair< int, int > > fully_connected)
Utils::Vector3d cell_size
Cell size.
std::span< Cell *const > local_cells() const override
std::vector< Cell * > m_local_cells
Utils::Vector3d max_range() const override
GhostCommunicator m_collect_ghost_force_comm
Utils::Vector3i cell_grid
Grid dimensions per node.
Utils::Vector3i cell_offset
Offset in global grid.
boost::mpi::communicator m_comm