ESPResSo
Extensible Simulation Package for Research on Soft Matter Systems
Loading...
Searching...
No Matches
gather_buffer.hpp
Go to the documentation of this file.
1/*
2 * Copyright (C) 2017-2026 The ESPResSo project
3 * Max-Planck-Institute for Polymer Research, Theory Group
4 *
5 * This file is part of ESPResSo.
6 *
7 * ESPResSo is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * ESPResSo is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#pragma once
22
24#include "gatherv.hpp"
25
26#include <boost/mpi/collectives.hpp>
27#include <boost/mpi/communicator.hpp>
28
29#include <algorithm>
30#include <type_traits>
31#include <vector>
32
33namespace Utils {
34namespace Mpi {
35
36/**
37 * @brief Gather buffer with different size on each node.
38 *
39 * Gathers buffers with different lengths from all nodes to root.
40 * The buffer is resized to the total size. On the @p root node,
41 * the first @p n_elem elements of @p buffer are moved, if need
42 * be. On the other nodes, @p buffer is not touched.
43 *
44 * This encapsulates a common combination of <tt>MPI_Gather()</tt>
45 * and <tt>MPI_{Send,Recv}()</tt>.
46 *
47 * @param buffer On the head node: the target buffer that has the local
48 * part in the beginning. On worker nodes: the local buffer.
49 * @param comm The MPI communicator.
50 * @param root The rank where the data should be gathered.
51 */
52template <typename T, class Allocator>
53void gather_buffer(std::vector<T, Allocator> &buffer,
54 boost::mpi::communicator const &comm, int root = 0) {
55 auto const n_elem = static_cast<int>(buffer.size());
56
57 if (comm.rank() == root) {
58 std::vector<int> sizes;
59 std::vector<int> displ;
60
61 auto const tot_size =
62 detail::size_and_offset<T>(sizes, displ, n_elem, comm, root);
63
64 /* Resize the buffer */
65 buffer.resize(static_cast<unsigned int>(tot_size));
66
67 /* Move the original data to its new location */
68 if (sizes[root] && displ[root]) {
69 for (int i = sizes[root] - 1; i >= 0; --i) {
70 buffer[i + displ[root]] = buffer[i];
71 }
72 }
73
74 /* Gather data */
75 gatherv(comm, buffer.data(), tot_size, buffer.data(), sizes.data(),
76 displ.data(), root);
77 } else {
78 /* Send local size */
79 detail::size_and_offset(n_elem, comm, root);
80 /* Send data */
81 gatherv(comm, buffer.data(), n_elem, static_cast<T *>(nullptr), nullptr,
82 nullptr, root);
83 }
84}
85} // namespace Mpi
86} // namespace Utils
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
void gather_buffer(std::vector< T, Allocator > &buffer, boost::mpi::communicator const &comm, int root=0)
Gather buffer with different size on each node.
void gatherv(const boost::mpi::communicator &comm, const T *in_values, int in_size, T *out_values, const int *sizes, const int *displs, int root)
Definition gatherv.hpp:87