ESPResSo
Extensible Simulation Package for Research on Soft Matter Systems
Loading...
Searching...
No Matches
EKReactionImplIndexed.hpp
Go to the documentation of this file.
1/*
2 * Copyright (C) 2022-2026 The ESPResSo project
3 *
4 * This file is part of ESPResSo.
5 *
6 * ESPResSo is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 3 of the License, or
9 * (at your option) any later version.
10 *
11 * ESPResSo is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#pragma once
21
23
29
30#include <blockforest/StructuredBlockForest.h>
31#include <domain_decomposition/BlockDataID.h>
32#include <domain_decomposition/IBlock.h>
33#include <field/AddToStorage.h>
34#include <field/FlagField.h>
35#include <field/FlagUID.h>
36#include <waLBerlaDefinitions.h>
37
38#include <utils/Vector.hpp>
39
40#include <cassert>
41#include <cstddef>
42#include <memory>
43#include <optional>
44#include <tuple>
45#include <vector>
46
47namespace walberla {
48template <lbmpy::Arch Architecture = lbmpy::Arch::CPU>
50private:
51 BlockDataID m_flagfield_id;
52 BlockDataID m_indexvector_id;
53 bool m_pending_changes;
54
55public:
56 /** Flag for domain cells, i.e. all cells. */
57 FlagUID const Domain_flag{"domain"};
58 /** Flag for boundary cells. */
59 FlagUID const Boundary_flag{"boundary"};
60
61 using FlagField = field::FlagField<uint8_t>;
62#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
63 using IndexVectors =
64 std::conditional<Architecture == lbmpy::Arch::CPU,
65 detail::ReactionKernelIndexedSelector::KernelTrait<>::
66 ReactionKernelIndexed::IndexVectors,
67 detail::ReactionKernelIndexedSelector::KernelTraitGPU<>::
68 ReactionKernelIndexedGPU::IndexVectors>::type;
69 using IndexInfo =
70 std::conditional<Architecture == lbmpy::Arch::CPU,
71 detail::ReactionKernelIndexedSelector::KernelTrait<>::
72 ReactionKernelIndexed::IndexInfo,
73 detail::ReactionKernelIndexedSelector::KernelTraitGPU<>::
74 ReactionKernelIndexedGPU::IndexInfo>::type;
75#else
76 using IndexVectors = detail::ReactionKernelIndexedSelector::KernelTrait<>::
77 ReactionKernelIndexed::IndexVectors;
78 using IndexInfo = detail::ReactionKernelIndexedSelector::KernelTrait<>::
79 ReactionKernelIndexed::IndexInfo;
80#endif
81
82private:
83 auto get_flag_field_and_flag(IBlock *block, BlockDataID const &flagfield_id) {
84 auto const flag_field =
86 auto const boundary_flag = flag_field->getFlag(Boundary_flag);
87 return std::make_tuple(flag_field, boundary_flag);
88 }
89
90public:
91 EKReactionImplIndexed(std::shared_ptr<LatticeWalberla> const &lattice,
94 m_pending_changes(false) {
95 m_flagfield_id = field::addFlagFieldToStorage<FlagField>(
96 get_lattice()->get_blocks(), "flag field reaction",
97 get_lattice()->get_ghost_layers());
98
99 auto createIdxVector = [](IBlock *const, StructuredBlockStorage *const) {
100 return new IndexVectors();
101 };
102 m_indexvector_id = get_lattice()
103 ->get_blocks()
105 createIdxVector, "IndexField");
106
107 for (auto &block : *get_lattice()->get_blocks()) {
108 auto flag_field = block.template getData<FlagField>(m_flagfield_id);
109 // register flags
110 flag_field->registerFlag(Domain_flag);
111 flag_field->registerFlag(Boundary_flag);
112 // mark all cells as domain cells and fluid cells
113 auto domain_flag = flag_field->getFlag(Domain_flag);
114 auto boundary_flag = flag_field->getFlag(Boundary_flag);
115 for (auto it = flag_field->begin(); it != flag_field->end(); ++it) {
116 flag_field->addFlag(it.x(), it.y(), it.z(), domain_flag);
117 flag_field->removeFlag(it.x(), it.y(), it.z(), boundary_flag);
118 }
119 }
120 }
121 ~EKReactionImplIndexed() override = default;
122
126
127 void perform_reaction() override {
129 std::function<void(IBlock *)> kernel;
131 kernel = detail::ReactionKernelIndexedSelector::get_kernel(
132 get_reactants(), get_coefficient(), m_indexvector_id);
133 } else {
134#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
135 kernel = detail::ReactionKernelIndexedSelector::get_kernel_gpu(
136 get_reactants(), get_coefficient(), m_indexvector_id);
137#endif
138 }
139 for (auto &block : *get_lattice()->get_blocks()) {
140 kernel(&block);
141 }
142 }
143
145 bool is_boundary) override {
146 if (auto bc = get_block_and_cell(*get_lattice(), node, true)) {
147 auto const [flag_field, boundary_flag] =
148 get_flag_field_and_flag(bc->block, m_flagfield_id);
149 if (is_boundary) {
150 flag_field->addFlag(bc->cell, boundary_flag);
151 } else {
152 flag_field->removeFlag(bc->cell, boundary_flag);
153 }
154 m_pending_changes = true;
155 }
156 }
157
158 [[nodiscard]] std::optional<bool>
159 get_node_is_boundary(Utils::Vector3i const &node) override {
160 if (auto bc = get_block_and_cell(*get_lattice(), node, true)) {
161 auto const [flag_field, boundary_flag] =
162 get_flag_field_and_flag(bc->block, m_flagfield_id);
163 return {flag_field->isFlagSet(bc->cell, boundary_flag)};
164 }
165 return std::nullopt;
166 }
167
169 if (m_pending_changes) {
170 for (auto &block : *get_lattice()->get_blocks()) {
171 fillFromFlagField(block);
172 }
173 m_pending_changes = false;
174 }
175 }
176
177private:
178 void fillFromFlagField(IBlock &block) {
179 auto *indexVectors =
180 block.uncheckedFastGetData<IndexVectors>(m_indexvector_id);
181 auto &indexVectorAll = indexVectors->indexVector(IndexVectors::ALL);
184
185 auto *flagField = block.getData<FlagField>(m_flagfield_id);
186
187 assert(flagField->flagExists(Boundary_flag) and
188 flagField->flagExists(Domain_flag));
189
190 auto boundaryFlag = flagField->getFlag(Boundary_flag);
191 auto domainFlag = flagField->getFlag(Domain_flag);
192
193 auto inner = flagField->xyzSize();
194 inner.expand(cell_idx_t(-1));
195
196 indexVectorAll.clear();
197 indexVectorInner.clear();
198 indexVectorOuter.clear();
199
200 auto flagWithGLayers = flagField->xyzSizeWithGhostLayer();
201 for (auto it = flagField->beginWithGhostLayerXYZ(); it != flagField->end();
202 ++it) {
204 continue;
205
206 if (flagWithGLayers.contains(it.x(), it.y(), it.z()) &&
207 isFlagSet(it.neighbor(0, 0, 0, 0), domainFlag)) {
208 auto element = IndexInfo(it.x(), it.y(), it.z());
209 indexVectorAll.push_back(element);
210 if (inner.contains(it.x(), it.y(), it.z())) {
211 indexVectorInner.push_back(element);
212 } else {
213 indexVectorOuter.push_back(element);
214 }
215 }
216 }
217
218 indexVectors->syncGPU();
219 }
220};
221
222} // namespace walberla
Vector implementation and trait types for boost qvm interoperability.
std::vector< std::shared_ptr< EKReactant > > reactants_type
auto get_lattice() const noexcept
double get_coefficient() const noexcept
auto const & get_reactants() const noexcept
std::optional< bool > get_node_is_boundary(Utils::Vector3i const &node) override
FlagUID const Boundary_flag
Flag for boundary cells.
FlagUID const Domain_flag
Flag for domain cells, i.e.
field::FlagField< uint8_t > FlagField
detail::ReactionKernelIndexedSelector::KernelTrait<>::ReactionKernelIndexed::IndexInfo IndexInfo
~EKReactionImplIndexed() override=default
detail::ReactionKernelIndexedSelector::KernelTrait<>::ReactionKernelIndexed::IndexVectors IndexVectors
EKReactionImplIndexed(std::shared_ptr< LatticeWalberla > const &lattice, reactants_type const &reactants, double coefficient)
void set_node_is_boundary(Utils::Vector3i const &node, bool is_boundary) override
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
static double * block(double *p, std::size_t index, std::size_t size)
Definition elc.cpp:177
\file PackInfoPdfDoublePrecision.cpp \author pystencils
std::optional< BlockAndCell > get_block_and_cell(::LatticeWalberla const &lattice, signed_integral_vector auto const &node, bool consider_ghost_layers)