ESPResSo
Extensible Simulation Package for Research on Soft Matter Systems
Loading...
Searching...
No Matches
ReactionKernelIndexed_1_double_precision_CUDA.h
Go to the documentation of this file.
1/*
2 * Copyright (C) 2022-2025 The ESPResSo project
3 * Copyright (C) 2020-2025 The waLBerla project
4 *
5 * This file is part of ESPResSo.
6 *
7 * ESPResSo is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * ESPResSo is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21// kernel generated with pystencils v1.4+1.ge851f4e, lbmpy v1.4+1.ge9efe34,
22// sympy v1.12.1, lbmpy_walberla/pystencils_walberla from waLBerla commit
23// 007e77e077ad9d22b5eed6f3d3118240993e553c
24
25/*
26 * Boundary class.
27 * Adapted from the waLBerla source file
28 * https://i10git.cs.fau.de/walberla/walberla/-/blob/3e54d4f2336e47168ad87e3caaf7b3b082d86ca7/python/pystencils_walberla/templates/Boundary.tmpl.h
29 */
30
31#pragma once
32
33#include <core/DataTypes.h>
34
35#include <blockforest/StructuredBlockForest.h>
36#include <core/debug/Debug.h>
37#include <domain_decomposition/BlockDataID.h>
38#include <domain_decomposition/IBlock.h>
39#include <field/FlagField.h>
40#include <gpu/FieldCopy.h>
41#include <gpu/GPUField.h>
42#include <gpu/GPUWrapper.h>
43
44#include <array>
45#include <cassert>
46#include <functional>
47#include <memory>
48#include <vector>
49
50#if defined(__clang__)
51#pragma clang diagnostic push
52#pragma clang diagnostic ignored "-Wunused-variable"
53#pragma clang diagnostic ignored "-Wunused-parameter"
54#elif defined(__GNUC__) or defined(__GNUG__)
55#pragma GCC diagnostic push
56#pragma GCC diagnostic ignored "-Wunused-variable"
57#pragma GCC diagnostic ignored "-Wunused-parameter"
58#endif
59
60#ifdef __GNUC__
61#define RESTRICT __restrict__
62#elif _MSC_VER
63#define RESTRICT __restrict
64#else
65#define RESTRICT
66#endif
67
68#ifdef WALBERLA_BUILD_WITH_HALF_PRECISION_SUPPORT
69using walberla::half;
70#endif
71
72namespace walberla {
73namespace pystencils {
74
76public:
77 struct IndexInfo {
78 int32_t x;
79 int32_t y;
80 int32_t z;
81 IndexInfo(int32_t x_, int32_t y_, int32_t z_) : x(x_), y(y_), z(z_) {}
82 bool operator==(const IndexInfo &o) const {
83 return x == o.x && y == o.y && z == o.z;
84 }
85 };
86
88 public:
89 using CpuIndexVector = std::vector<IndexInfo>;
90
91 enum Type { ALL = 0, INNER = 1, OUTER = 2, NUM_TYPES = 3 };
92
93 IndexVectors() = default;
94 bool operator==(IndexVectors const &other) const {
95 return other.cpuVectors_ == cpuVectors_;
96 }
97
99 for (auto &gpuVec : gpuVectors_) {
100 if (gpuVec) {
101 WALBERLA_GPU_CHECK(gpuFree(gpuVec));
102 }
103 }
104 }
105 auto &indexVector(Type t) { return cpuVectors_[t]; }
106 auto const &indexVector(Type t) const { return cpuVectors_[t]; }
108 return cpuVectors_[t].empty() ? nullptr : cpuVectors_[t].data();
109 }
110
111 IndexInfo *pointerGpu(Type t) { return gpuVectors_[t]; }
112 void syncGPU() {
113 for (auto &gpuVec : gpuVectors_) {
114 if (gpuVec) {
115 WALBERLA_GPU_CHECK(gpuFree(gpuVec));
116 gpuVec = nullptr;
117 }
118 }
119 gpuVectors_.resize(cpuVectors_.size());
120
121 WALBERLA_ASSERT_EQUAL(cpuVectors_.size(), NUM_TYPES);
122 for (size_t i = 0; i < cpuVectors_.size(); ++i) {
123 auto &gpuVec = gpuVectors_[i];
124 auto &cpuVec = cpuVectors_[i];
125 if (cpuVec.empty()) {
126 continue;
127 }
128 WALBERLA_GPU_CHECK(
129 gpuMalloc(&gpuVec, sizeof(IndexInfo) * cpuVec.size()));
130 WALBERLA_GPU_CHECK(gpuMemcpy(gpuVec, cpuVec.data(),
131 sizeof(IndexInfo) * cpuVec.size(),
132 gpuMemcpyHostToDevice));
133 }
134 }
135
136 private:
137 std::vector<CpuIndexVector> cpuVectors_{NUM_TYPES};
138
139 using GpuIndexVector = IndexInfo *;
140 std::vector<GpuIndexVector> gpuVectors_;
141 };
142
144 const std::shared_ptr<StructuredBlockForest> &blocks,
145 BlockDataID rho_0ID_, double order_0, double rate_coefficient,
146 double stoech_0)
147 : rho_0ID(rho_0ID_), order_0_(order_0),
148 rate_coefficient_(rate_coefficient), stoech_0_(stoech_0) {
149 auto createIdxVector = [](IBlock *const, StructuredBlockStorage *const) {
150 return new IndexVectors();
151 };
152 indexVectorID = blocks->addStructuredBlockData<IndexVectors>(
153 createIdxVector,
154 "IndexField_ReactionKernelIndexed_1_double_precision_CUDA");
155 }
156
158 BlockDataID rho_0ID_,
159 double order_0,
160 double rate_coefficient,
161 double stoech_0)
162 : indexVectorID(indexVectorID_), rho_0ID(rho_0ID_), order_0_(order_0),
163 rate_coefficient_(rate_coefficient), stoech_0_(stoech_0) {}
164
165 void run(IBlock *block, gpuStream_t stream = nullptr);
166
167 void operator()(IBlock *block, gpuStream_t stream = nullptr) {
168 run(block, stream);
169 }
170
171 void inner(IBlock *block, gpuStream_t stream = nullptr);
172
173 void outer(IBlock *block, gpuStream_t stream = nullptr);
174
175 Vector3<double> getForce(IBlock * /*block*/) {
176
177 WALBERLA_ABORT(
178 "Boundary condition was not generated including force calculation.")
179 return Vector3<double>(double_c(0.0));
180 }
181
182 std::function<void(IBlock *)> getSweep(gpuStream_t stream = nullptr) {
183 return [this, stream](IBlock *b) { this->run(b, stream); };
184 }
185
186 std::function<void(IBlock *)> getInnerSweep(gpuStream_t stream = nullptr) {
187 return [this, stream](IBlock *b) { this->inner(b, stream); };
188 }
189
190 std::function<void(IBlock *)> getOuterSweep(gpuStream_t stream = nullptr) {
191 return [this, stream](IBlock *b) { this->outer(b, stream); };
192 }
193
194 template <typename FlagField_T>
195 void fillFromFlagField(const std::shared_ptr<StructuredBlockForest> &blocks,
196 ConstBlockDataID flagFieldID, FlagUID boundaryFlagUID,
197 FlagUID domainFlagUID) {
198 for (auto &block : *blocks)
199 fillFromFlagField<FlagField_T>(&block, flagFieldID, boundaryFlagUID,
200 domainFlagUID);
201 }
202
203 template <typename FlagField_T>
204 void fillFromFlagField(IBlock *block, ConstBlockDataID flagFieldID,
205 FlagUID boundaryFlagUID, FlagUID domainFlagUID) {
206 auto *indexVectors = block->getData<IndexVectors>(indexVectorID);
207 auto &indexVectorAll = indexVectors->indexVector(IndexVectors::ALL);
208 auto &indexVectorInner = indexVectors->indexVector(IndexVectors::INNER);
209 auto &indexVectorOuter = indexVectors->indexVector(IndexVectors::OUTER);
210
211 auto *flagField = block->getData<FlagField_T>(flagFieldID);
212
213 if (!(flagField->flagExists(boundaryFlagUID) and
214 flagField->flagExists(domainFlagUID)))
215 return;
216
217 auto boundaryFlag = flagField->getFlag(boundaryFlagUID);
218 auto domainFlag = flagField->getFlag(domainFlagUID);
219
220 auto inner = flagField->xyzSize();
221 inner.expand(cell_idx_t(-1));
222
223 indexVectorAll.clear();
224 indexVectorInner.clear();
225 indexVectorOuter.clear();
226
227 auto flagWithGLayers = flagField->xyzSizeWithGhostLayer();
228 for (auto it = flagField->beginWithGhostLayerXYZ(); it != flagField->end();
229 ++it) {
230
231 if (!isFlagSet(it, boundaryFlag))
232 continue;
233 if (flagWithGLayers.contains(it.x() + cell_idx_c(0),
234 it.y() + cell_idx_c(0),
235 it.z() + cell_idx_c(0)) &&
236 isFlagSet(it.neighbor(0, 0, 0, 0), domainFlag)) {
237
238 auto element = IndexInfo(it.x(), it.y(), it.z(), 0);
239
240 indexVectorAll.emplace_back(element);
241 if (inner.contains(it.x(), it.y(), it.z()))
242 indexVectorInner.emplace_back(element);
243 else
244 indexVectorOuter.emplace_back(element);
245 }
246 }
247
248 indexVectors->syncGPU();
249 }
250
251private:
252 void run_impl(IBlock *block, IndexVectors::Type type,
253 gpuStream_t stream = nullptr);
254
255 BlockDataID indexVectorID;
256
257public:
258 BlockDataID rho_0ID;
259 double order_0_;
261 double stoech_0_;
262};
263
264#if defined(__clang__)
265#pragma clang diagnostic pop
266#elif defined(__GNUC__) or defined(__GNUG__)
267#pragma GCC diagnostic pop
268#endif
269
270} // namespace pystencils
271} // namespace walberla
ReactionKernelIndexed_1_double_precision_CUDA(const std::shared_ptr< StructuredBlockForest > &blocks, BlockDataID rho_0ID_, double order_0, double rate_coefficient, double stoech_0)
ReactionKernelIndexed_1_double_precision_CUDA(BlockDataID indexVectorID_, BlockDataID rho_0ID_, double order_0, double rate_coefficient, double stoech_0)
void fillFromFlagField(const std::shared_ptr< StructuredBlockForest > &blocks, ConstBlockDataID flagFieldID, FlagUID boundaryFlagUID, FlagUID domainFlagUID)
void fillFromFlagField(IBlock *block, ConstBlockDataID flagFieldID, FlagUID boundaryFlagUID, FlagUID domainFlagUID)
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
static double * block(double *p, std::size_t index, std::size_t size)
Definition elc.cpp:176
\file PackInfoPdfDoublePrecision.cpp \author pystencils