ESPResSo
Extensible Simulation Package for Research on Soft Matter Systems
Loading...
Searching...
No Matches
EKinWalberlaImpl.hpp
Go to the documentation of this file.
1/*
2 * Copyright (C) 2022-2026 The ESPResSo project
3 *
4 * This file is part of ESPResSo.
5 *
6 * ESPResSo is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 3 of the License, or
9 * (at your option) any later version.
10 *
11 * ESPResSo is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#pragma once
21
22#include <blockforest/communication/UniformBufferedScheme.h>
23#include <field/AddToStorage.h>
24#include <field/FlagField.h>
25#include <field/FlagUID.h>
26#include <field/GhostLayerField.h>
27#include <field/communication/PackInfo.h>
28#include <field/vtk/FlagFieldCellFilter.h>
29#include <field/vtk/VTKWriter.h>
30#include <stencil/D3Q27.h>
31#include <waLBerlaDefinitions.h>
32#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
33#include <gpu/AddGPUFieldToStorage.h>
34#include <gpu/HostFieldAllocator.h>
35#include <gpu/communication/MemcpyPackInfo.h>
36#include <gpu/communication/UniformGPUScheme.h>
37#endif
38
39#include "../BoundaryHandling.hpp"
40#include "../BoundaryPackInfo.hpp"
41#include "../utils/boundary.hpp"
42#include "../utils/types_conversion.hpp"
43#include "ek_kernels.hpp"
44#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
45#include "ek_kernels.cuh"
46#endif
47
54
55#include <utils/Vector.hpp>
56
57#include <cstddef>
58#include <iterator>
59#include <memory>
60#include <optional>
61#include <stdexcept>
62#include <string>
63#include <type_traits>
64#include <variant>
65#include <vector>
66
67namespace walberla {
68
69/** @brief Class that runs and controls the EK on waLBerla. */
70template <std::size_t FluxCount = 13, typename FloatType = double,
73#if not defined(WALBERLA_BUILD_WITH_CUDA)
74 static_assert(Architecture != lbmpy::Arch::GPU,
75 "waLBerla was compiled without CUDA support");
76#endif
77 using ContinuityKernel =
79 using DiffusiveFluxKernelUnthermalized =
80 typename detail::KernelTrait<FloatType,
81 Architecture>::DiffusiveFluxKernel;
82 using DiffusiveFluxKernelThermalized = typename detail::KernelTrait<
83 FloatType, Architecture>::DiffusiveFluxKernelThermalized;
84 using AdvectiveFluxKernel =
85 typename detail::KernelTrait<FloatType,
86 Architecture>::AdvectiveFluxKernel;
87 using FrictionCouplingKernel =
88 typename detail::KernelTrait<FloatType,
89 Architecture>::FrictionCouplingKernel;
90 using DiffusiveFluxKernelElectrostaticUnthermalized =
91 typename detail::KernelTrait<
92 FloatType, Architecture>::DiffusiveFluxKernelElectrostatic;
93 using DiffusiveFluxKernelElectrostaticThermalized =
94 typename detail::KernelTrait<
95 FloatType, Architecture>::DiffusiveFluxKernelElectrostaticThermalized;
96
97 using DiffusiveFluxKernel = std::variant<DiffusiveFluxKernelUnthermalized,
98 DiffusiveFluxKernelThermalized>;
99 using DiffusiveFluxKernelElectrostatic =
100 std::variant<DiffusiveFluxKernelElectrostaticUnthermalized,
101 DiffusiveFluxKernelElectrostaticThermalized>;
102
103 using Dirichlet =
105 using FixedFlux =
107
110 using BoundaryModelFlux =
112
113public:
114 /** @brief Stencil for collision and streaming operations. */
115 using Stencil = stencil::D3Q27;
116 /** @brief Lattice model (e.g. blockforest). */
118
119protected:
120 template <typename FT, lbmpy::Arch AT = lbmpy::Arch::CPU> struct FieldTrait {
121 // Type definitions
124 template <class Field>
125 using PackInfo = field::communication::PackInfo<Field>;
126 template <class Stencil>
128 blockforest::communication::UniformBufferedScheme<Stencil>;
129 template <class Stencil>
131 blockforest::communication::UniformBufferedScheme<Stencil>;
132 };
133 using FlagField = walberla::FlagField<walberla::uint8_t>;
134#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
135 template <typename FT> struct FieldTrait<FT, lbmpy::Arch::GPU> {
136 private:
137 static auto constexpr AT = lbmpy::Arch::GPU;
138 template <class Field>
139 using MemcpyPackInfo = gpu::communication::MemcpyPackInfo<Field>;
140
141 public:
142 template <typename Stencil>
143 class UniformGPUScheme
144 : public gpu::communication::UniformGPUScheme<Stencil> {
145 public:
146 explicit UniformGPUScheme(auto const &bf)
147 : gpu::communication::UniformGPUScheme<Stencil>(
148 bf, /* sendDirectlyFromGPU */ false,
149 /* useLocalCommunication */ false) {}
150 };
151 using FluxField = gpu::GPUField<FT>;
152 using DensityField = gpu::GPUField<FT>;
153 template <class Field> using PackInfo = MemcpyPackInfo<Field>;
154 template <class Stencil>
156 template <class Stencil>
157 using BoundaryCommScheme =
158 blockforest::communication::UniformBufferedScheme<Stencil>;
159 };
160 using GPUField = gpu::GPUField<FloatType>;
161#endif
162
163 struct GhostComm {
164 /** @brief Ghost communication operations. */
165 enum GhostCommFlags : unsigned {
166 FLB, ///< flux boundary communication
167 DENS, ///< density communication
168 SIZE
169 };
170 };
171
172 // "underlying" field types (`GPUField` has no f-size info at compile time)
175
176public:
180
181#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
184#endif
185
186 template <typename T> FloatType FloatType_c(T t) {
187 return numeric_cast<FloatType>(t);
188 }
189
190 [[nodiscard]] std::size_t stencil_size() const noexcept override {
191 return FluxCount;
192 }
193
195 return std::is_same_v<FloatType, double>;
196 }
197
198private:
199 FloatType m_diffusion;
200 FloatType m_kT;
201 FloatType m_valency;
202 Utils::Vector3d m_ext_efield;
203 bool m_advection;
204 bool m_friction_coupling;
205 unsigned int m_seed;
206
207protected:
208 // Block data access handles
210
212
215
216#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
217 std::optional<BlockDataID> m_density_cpu_field_id;
218 std::optional<BlockDataID> m_flux_cpu_field_id;
219#endif
220
221 /** Flag for domain cells, i.e. all cells. */
222 FlagUID const Domain_flag{"domain"};
223 /** Flag for boundary cells. */
224 FlagUID const Boundary_flag{"boundary"};
225
226 /** Block forest */
227 std::shared_ptr<LatticeWalberla> m_lattice;
228
229 std::unique_ptr<BoundaryModelDensity> m_boundary_density;
230 std::shared_ptr<BoundaryModelFlux> m_boundary_flux;
231
232 std::unique_ptr<DiffusiveFluxKernel> m_diffusive_flux;
233 std::unique_ptr<DiffusiveFluxKernelElectrostatic>
235 std::unique_ptr<ContinuityKernel> m_continuity;
236
237#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
238 std::shared_ptr<gpu::HostFieldAllocator<FloatType>> m_host_field_allocator;
239#endif
240
241 // ResetFlux + external force
242 // TODO: kernel for that
243 // std::shared_ptr<ResetForce<PdfField, VectorField>> m_reset_force;
244
245 /**
246 * @brief Convenience function to add a field with a custom allocator.
247 *
248 * When vectorization is off, let waLBerla decide which memory allocator
249 * to use. When vectorization is on, the aligned memory allocator is
250 * required, otherwise <tt>cpu_vectorize_info["assume_aligned"]</tt> will
251 * trigger assertions. That is because for single-precision kernels the
252 * waLBerla heuristic in <tt>src/field/allocation/FieldAllocator.h</tt>
253 * will fall back to @c StdFieldAlloc, yet @c AllocateAligned is needed
254 * for intrinsics to work.
255 */
256 template <typename Field>
257 auto add_to_storage(std::string const tag, FloatType value) {
258 auto const &blocks = m_lattice->get_blocks();
259 auto const n_ghost_layers = m_lattice->get_ghost_layers();
260 if constexpr (Architecture == lbmpy::Arch::CPU) {
261 return field::addToStorage<Field>(blocks, tag, FloatType{value},
262 field::fzyx, n_ghost_layers);
263 }
264#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
265 else {
266 auto field_id = gpu::addGPUFieldToStorage<GPUField>(
267 blocks, tag, Field::F_SIZE, field::fzyx, n_ghost_layers);
268 if constexpr (std::is_same_v<Field, _DensityField>) {
269 for (auto block = blocks->begin(); block != blocks->end(); ++block) {
270 auto field = block->template getData<GPUField>(field_id);
271 ek::accessor::Scalar::initialize(field, FloatType{value});
272 }
273 } else if constexpr (std::is_same_v<Field, _FluxField>) {
274 for (auto block = blocks->begin(); block != blocks->end(); ++block) {
275 auto field = block->template getData<GPUField>(field_id);
277 std::array<FloatType, FluxCount>{});
278 }
279 }
280 return field_id;
281 }
282#endif
283 }
284
285 void
286 reset_density_boundary_handling(std::shared_ptr<BlockStorage> const &blocks) {
287 auto const [lc, uc] = m_lattice->get_local_grid_range(true);
288 m_boundary_density = std::make_unique<BoundaryModelDensity>(
290 CellInterval{to_cell(lc), to_cell(uc)});
291 }
292
293 void
294 reset_flux_boundary_handling(std::shared_ptr<BlockStorage> const &blocks) {
295 auto const [lc, uc] = m_lattice->get_local_grid_range(true);
296 m_boundary_flux = std::make_shared<BoundaryModelFlux>(
298 CellInterval{to_cell(lc), to_cell(uc)});
299 }
300
302 typename FieldTrait<FloatType, Architecture>::template RegularCommScheme<
303 typename stencil::D3Q27>;
305 typename FieldTrait<FloatType, Architecture>::template BoundaryCommScheme<
306 typename stencil::D3Q27>;
307 std::shared_ptr<FullCommunicator> m_full_communication;
308 std::shared_ptr<BoundaryFullCommunicator> m_boundary_communicator;
309 std::bitset<GhostComm::SIZE> m_pending_ghost_comm;
311 template <class Field>
312 using PackInfo =
314
315public:
316 EKinWalberlaImpl(std::shared_ptr<LatticeWalberla> lattice, double diffusion,
317 double kT, double valency, Utils::Vector3d const &ext_efield,
318 double density, bool advection, bool friction_coupling,
319 bool thermalized, unsigned int seed)
320 : m_diffusion(FloatType_c(diffusion)), m_kT(FloatType_c(kT)),
321 m_valency(FloatType_c(valency)), m_ext_efield(ext_efield),
322 m_advection(advection), m_friction_coupling(friction_coupling),
323 m_seed(seed), m_lattice(std::move(lattice)),
325
326 auto const &blocks = m_lattice->get_blocks();
327 auto const n_ghost_layers = m_lattice->get_ghost_layers();
328
332 add_to_storage<_FluxField>("flux field", FloatType_c(0.0));
333
335 std::make_unique<ContinuityKernel>(m_flux_field_id, m_density_field_id);
336
337#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
339 std::make_shared<gpu::HostFieldAllocator<FloatType>>();
340#endif
341
342 if (thermalized) {
343 set_diffusion_kernels(*m_lattice, seed);
344 } else {
345 set_diffusion_kernels();
346 }
347
348 // Init boundary related stuff
349 m_flag_field_density_id = field::addFlagFieldToStorage<FlagField>(
350 blocks, "flag field density", n_ghost_layers);
352
353 m_flag_field_flux_id = field::addFlagFieldToStorage<FlagField>(
354 blocks, "flag field flux", n_ghost_layers);
356
357 m_full_communication = std::make_shared<FullCommunicator>(blocks);
358 m_full_communication->addPackInfo(
359 std::make_shared<PackInfo<DensityField>>(m_density_field_id));
361 std::make_shared<BoundaryFullCommunicator>(blocks);
362 m_boundary_communicator->addPackInfo(
365 auto flux_boundary_packinfo = std::make_shared<
370
372 }
373
374 // Global parameters
375 [[nodiscard]] double get_diffusion() const noexcept override {
376 return m_diffusion;
377 }
378 [[nodiscard]] double get_kT() const noexcept override { return m_kT; }
379 [[nodiscard]] double get_valency() const noexcept override {
380 return m_valency;
381 }
382 [[nodiscard]] bool get_advection() const noexcept override {
383 return m_advection;
384 }
386 return m_friction_coupling;
387 }
389 return m_ext_efield;
390 }
392 return static_cast<bool>(
393 std::get_if<DiffusiveFluxKernelThermalized>(&*m_diffusive_flux));
394 }
395 [[nodiscard]] unsigned int get_seed() const noexcept override {
396 return m_seed;
397 }
398 [[nodiscard]] std::optional<uint64_t> get_rng_state() const override {
399 auto const kernel =
400 std::get_if<DiffusiveFluxKernelThermalized>(&*m_diffusive_flux);
401 if (!kernel) {
402 return std::nullopt;
403 }
404 return {static_cast<uint64_t>(kernel->getTime_step())};
405 }
406
407 void set_diffusion(double diffusion) override {
408 m_diffusion = FloatType_c(diffusion);
409 auto visitor = [m_diffusion = m_diffusion](auto &kernel) {
410 kernel.setD(m_diffusion);
411 };
412 std::visit(visitor, *m_diffusive_flux);
414 }
415
416 void set_kT(double kT) override {
417 m_kT = FloatType_c(kT);
418 std::visit([m_kT = m_kT](auto &kernel) { kernel.setKt(m_kT); },
420 }
421
422 void set_valency(double valency) override {
423 m_valency = FloatType_c(valency);
424 std::visit(
425 [m_valency = m_valency](auto &kernel) { kernel.setZ(m_valency); },
427 }
428
429 void set_advection(bool advection) override { m_advection = advection; }
430
432 m_friction_coupling = friction_coupling;
433 }
434
435 void set_rng_state(uint64_t counter) override {
436 auto const kernel =
437 std::get_if<DiffusiveFluxKernelThermalized>(&*m_diffusive_flux);
438 auto const kernel_electrostatic =
439 std::get_if<DiffusiveFluxKernelElectrostaticThermalized>(
441
442 if (!kernel or !kernel_electrostatic) {
443 throw std::runtime_error("This EK instance is unthermalized");
444 }
445 assert(counter <=
446 static_cast<uint32_t>(std::numeric_limits<uint_t>::max()));
447 kernel->setTime_step(static_cast<uint32_t>(counter));
448 kernel_electrostatic->setTime_step(static_cast<uint32_t>(counter));
449 }
450
451 void set_ext_efield(Utils::Vector3d const &field) override {
452 m_ext_efield = field;
453
454 std::visit(
455 [this](auto &kernel) {
456 kernel.setF_ext_0(FloatType_c(m_ext_efield[0]));
457 kernel.setF_ext_1(FloatType_c(m_ext_efield[1]));
458 kernel.setF_ext_2(FloatType_c(m_ext_efield[2]));
459 },
461 }
462
463 void ghost_communication() override {
466 (*m_full_communication)();
468 }
470 }
471
479
480private:
481 void set_diffusion_kernels() {
482 auto kernel = DiffusiveFluxKernelUnthermalized(
484 m_diffusive_flux = std::make_unique<DiffusiveFluxKernel>(std::move(kernel));
485
486 auto kernel_electrostatic = DiffusiveFluxKernelElectrostaticUnthermalized(
488 FloatType_c(m_diffusion), FloatType_c(m_ext_efield[0]),
489 FloatType_c(m_ext_efield[1]), FloatType_c(m_ext_efield[2]),
490 FloatType_c(m_kT), FloatType_c(m_valency));
491
493 std::make_unique<DiffusiveFluxKernelElectrostatic>(
494 std::move(kernel_electrostatic));
495 }
496
497 void set_diffusion_kernels(LatticeWalberla const &lattice,
498 unsigned int seed) {
499 auto const grid_dim = lattice.get_grid_dimensions();
500
501 auto kernel = DiffusiveFluxKernelThermalized(
503 grid_dim[0], grid_dim[1], grid_dim[2], seed, 0);
504
505 auto kernel_electrostatic = DiffusiveFluxKernelElectrostaticThermalized(
507 FloatType_c(m_diffusion), FloatType_c(m_ext_efield[0]),
508 FloatType_c(m_ext_efield[1]), FloatType_c(m_ext_efield[2]), grid_dim[0],
509 grid_dim[1], grid_dim[2], FloatType_c(m_kT), seed, 0,
510 FloatType_c(m_valency));
511
512 auto const blocks = lattice.get_blocks();
513
514 for (auto &block : *blocks) {
515 kernel.configure(blocks, &block);
516 kernel_electrostatic.configure(blocks, &block);
517 }
518
519 m_diffusive_flux = std::make_unique<DiffusiveFluxKernel>(std::move(kernel));
521 std::make_unique<DiffusiveFluxKernelElectrostatic>(
522 std::move(kernel_electrostatic));
523 }
524
525 void kernel_boundary_density() {
526 for (auto &block : *m_lattice->get_blocks()) {
527 (*m_boundary_density)(&block);
528 }
529 }
530
531 void kernel_boundary_flux() {
532 for (auto &block : *m_lattice->get_blocks()) {
533 (*m_boundary_flux)(&block);
534 }
535 }
536
537 void kernel_continuity() {
538 for (auto &block : *m_lattice->get_blocks()) {
539 (*m_continuity).run(&block);
540 }
541 }
542
543 void kernel_diffusion() {
544 for (auto &block : *m_lattice->get_blocks()) {
545 std::visit([&block](auto &kernel) { kernel.run(&block); },
547 }
548
549 if (auto *kernel =
550 std::get_if<DiffusiveFluxKernelThermalized>(&*m_diffusive_flux)) {
551 kernel->setTime_step(kernel->getTime_step() + 1u);
552
554 std::get_if<DiffusiveFluxKernelElectrostaticThermalized>(
556 kernel_electrostatic->setTime_step(kernel_electrostatic->getTime_step() +
557 1u);
558 }
559 }
560
561 void kernel_advection(std::size_t const velocity_id) {
562 auto kernel = AdvectiveFluxKernel(m_flux_field_id, m_density_field_id,
564 for (auto &block : *m_lattice->get_blocks()) {
565 kernel.run(&block);
566 }
567 }
568
569 void kernel_friction_coupling(std::size_t const force_id,
570 double const lb_density) {
571 auto kernel = FrictionCouplingKernel(
573 FloatType_c(get_kT()), FloatType(lb_density));
574 for (auto &block : *m_lattice->get_blocks()) {
575 kernel.run(&block);
576 }
577 }
578
579 void kernel_diffusion_electrostatic(std::size_t const potential_id) {
580 auto const phiID = BlockDataID(potential_id);
581 std::visit([phiID](auto &kernel) { kernel.setPhiID(phiID); },
583
584 for (auto &block : *m_lattice->get_blocks()) {
585 std::visit([&block](auto &kernel) { kernel.run(&block); },
587 }
588
589 if (auto *kernel_electrostatic =
590 std::get_if<DiffusiveFluxKernelElectrostaticThermalized>(
592 kernel_electrostatic->setTime_step(kernel_electrostatic->getTime_step() +
593 1u);
594
595 auto *kernel =
596 std::get_if<DiffusiveFluxKernelThermalized>(&*m_diffusive_flux);
597 kernel->setTime_step(kernel->getTime_step() + 1u);
598 }
599 }
600
601 void kernel_migration() {}
602
603 void update_boundary_fields() {
604 m_boundary_flux->boundary_update();
605 m_boundary_density->boundary_update();
606 }
607
608protected:
609 void integrate_vtk_writers() override {
610 for (auto const &it : m_vtk_auto) {
611 auto &vtk_handle = it.second;
612 if (vtk_handle->enabled) {
613 vtk::writeFiles(vtk_handle->ptr)();
614 vtk_handle->execution_count++;
615 }
616 }
617 }
618
619public:
620 void integrate(std::size_t potential_id, std::size_t velocity_id,
621 std::size_t force_id, double lb_density) override {
622
624 update_boundary_fields();
625
626 if (get_diffusion() == 0.)
627 return;
628
629 if (get_valency() != 0.) {
630 if (potential_id == walberla::BlockDataID{}) {
631 throw std::runtime_error("Walberla EK: electrostatic potential enabled "
632 "but no field accessible. potential id is " +
633 std::to_string(potential_id));
634 }
635 kernel_diffusion_electrostatic(potential_id);
636 } else {
637 kernel_diffusion();
638 }
639
640 kernel_migration();
641 kernel_boundary_flux();
642 // friction coupling
643 if (get_friction_coupling()) {
644 if (force_id == walberla::BlockDataID{}) {
645 throw std::runtime_error("Walberla EK: friction coupling enabled but "
646 "no force field accessible. force_id is " +
647 std::to_string(force_id) +
648 ". Hint: LB may be inactive.");
649 }
650 kernel_friction_coupling(force_id, lb_density);
651 }
652
653 if (get_advection()) {
654 if (velocity_id == walberla::BlockDataID{}) {
655 throw std::runtime_error("Walberla EK: advection enabled but no "
656 "velocity field accessible. velocity_id is " +
657 std::to_string(velocity_id) +
658 ". Hint: LB may be inactive.");
659 }
660 kernel_advection(velocity_id);
661 kernel_boundary_flux();
662 }
663 kernel_continuity();
664
665 // is this the expected behavior when reactions are included?
666 kernel_boundary_density();
668
669 // Handle VTK writers
671 }
672
673 [[nodiscard]] std::size_t get_density_id() const noexcept override {
674 static_assert(std::is_same_v<std::size_t, walberla::uint_t>);
675 return static_cast<std::size_t>(m_density_field_id);
676 }
677
678 bool set_node_density(Utils::Vector3i const &node, double density) override {
680 auto bc = get_block_and_cell(get_lattice(), node, false);
681 if (!bc)
682 return false;
683
684 auto density_field =
687 return true;
688 }
689
690 [[nodiscard]] std::optional<double>
692 bool consider_ghosts = false) const override {
694
695 if (!bc)
696 return std::nullopt;
697
698 auto const density_field =
701 }
702
703 [[nodiscard]] std::vector<double>
705 Utils::Vector3i const &upper_corner) const override {
706 std::vector<double> out;
707#ifndef NDEBUG
709#endif
710 auto const &lattice = get_lattice();
711 if (auto const ci = get_interval(lattice, lower_corner, upper_corner)) {
712 out = std::vector<double>(ci->numCells());
713 for (auto &block : *lattice.get_blocks()) {
714 auto const block_offset = lattice.get_block_corner(block, true);
715 if (auto const bci = get_block_interval(
717 auto const density_field =
720 assert(values.size() == bci->numCells());
721#ifndef NDEBUG
722 values_size += bci->numCells();
723#endif
724 auto kernel = [&values, &out](unsigned const block_index,
725 unsigned const local_index,
726 Utils::Vector3i const &) {
728 };
729
731 }
732 }
733 assert(values_size == ci->numCells());
734 }
735 return out;
736 }
737
740 std::vector<double> const &density) override {
742 auto const &lattice = get_lattice();
743 if (auto const ci = get_interval(lattice, lower_corner, upper_corner)) {
744 assert(density.size() == ci->numCells());
745 for (auto &block : *lattice.get_blocks()) {
746 auto const block_offset = lattice.get_block_corner(block, true);
747 if (auto const bci = get_block_interval(
749 auto const density_field =
751 std::vector<FloatType> values(bci->numCells());
752
753 auto kernel = [&values, &density](unsigned const block_index,
754 unsigned const local_index,
755 Utils::Vector3i const &) {
757 };
758
761 }
762 }
763 }
764 }
765
766 [[nodiscard]] std::optional<Utils::Vector3d>
768 bool consider_ghosts = false) const override {
770
771 if (!bc)
772 return std::nullopt;
773
774 auto const flux_field =
775 bc->block->template getData<FluxField>(m_flux_field_id);
777 }
778
779 std::vector<double>
781 Utils::Vector3i const &upper_corner) const override {
782 std::vector<double> out;
783#ifndef NDEBUG
785#endif
786 auto const &lattice = get_lattice();
787 if (auto const ci = get_interval(lattice, lower_corner, upper_corner)) {
788 out = std::vector<double>(3u * ci->numCells());
789 for (auto &block : *lattice.get_blocks()) {
790 auto const block_offset = lattice.get_block_corner(block, true);
791 if (auto const bci = get_block_interval(
793 auto const flux_field =
796 assert(values.size() == 3u * bci->numCells());
797#ifndef NDEBUG
798 values_size += 3u * bci->numCells();
799#endif
800
801 auto kernel = [&values, &out, this](unsigned const block_index,
802 unsigned const local_index,
803 Utils::Vector3i const &node) {
804 if (m_boundary_flux->node_is_boundary(node)) {
805 auto const &vec =
806 m_boundary_flux->get_node_value_at_boundary(node);
807 for (uint_t f = 0u; f < 3u; ++f) {
808 out[3u * local_index + f] = double_c(vec[f]);
809 }
810 } else {
811 for (uint_t f = 0u; f < 3u; ++f) {
812 out[3u * local_index + f] =
813 double_c(values[3u * block_index + f]);
814 }
815 }
816 };
817
819 }
820 }
821 assert(values_size == 3u * ci->numCells());
822 }
823 return out;
824 }
825
830
831 void clear_density_boundaries() override {
833 }
834
836 Utils::Vector3d const &flux) override {
838 auto bc = get_block_and_cell(get_lattice(), node, true);
839 if (!bc)
840 return false;
841
842 m_boundary_flux->set_node_value_at_boundary(
843 node, to_vector3<FloatType>(flux), *bc);
844 return true;
845 }
846
847 [[nodiscard]] std::optional<Utils::Vector3d>
849 bool consider_ghosts = false) const override {
851 auto const bc = get_block_and_cell(get_lattice(), node, consider_ghosts);
852 if (!bc or !m_boundary_flux->node_is_boundary(node))
853 return std::nullopt;
854
855 return {to_vector3d(m_boundary_flux->get_node_value_at_boundary(node))};
856 }
857
860 auto bc = get_block_and_cell(get_lattice(), node, true);
861 if (!bc)
862 return false;
863
864 m_boundary_flux->remove_node_from_boundary(node, *bc);
865 return true;
866 }
867
869 double density) override {
870 auto bc = get_block_and_cell(get_lattice(), node, true);
871 if (!bc)
872 return false;
873
874 m_boundary_density->set_node_value_at_boundary(node, FloatType_c(density),
875 *bc);
876
877 return true;
878 }
879
880 [[nodiscard]] std::optional<double>
882 bool consider_ghosts = false) const override {
883 auto const bc = get_block_and_cell(get_lattice(), node, consider_ghosts);
884 if (!bc or !m_boundary_density->node_is_boundary(node))
885 return std::nullopt;
886
887 return {double_c(m_boundary_density->get_node_value_at_boundary(node))};
888 }
889
892 std::vector<std::optional<double>> const &density) override {
893 auto const &lattice = get_lattice();
894 if (auto const ci = get_interval(lattice, lower_corner, upper_corner)) {
895 auto const local_offset = std::get<0>(lattice.get_local_grid_range());
896 auto const lower_cell = ci->min();
897 auto const upper_cell = ci->max();
898 auto it = density.begin();
899 assert(density.size() == ci->numCells());
900 for (auto x = lower_cell.x(); x <= upper_cell.x(); ++x) {
901 for (auto y = lower_cell.y(); y <= upper_cell.y(); ++y) {
902 for (auto z = lower_cell.z(); z <= upper_cell.z(); ++z) {
903 auto const node = local_offset + Utils::Vector3i{{x, y, z}};
904 auto const bc = get_block_and_cell(lattice, node, false);
905 auto const &opt = *it;
906 if (opt) {
907 m_boundary_density->set_node_value_at_boundary(
908 node, FloatType_c(*opt), *bc);
909 } else {
910 m_boundary_density->remove_node_from_boundary(node, *bc);
911 }
912 ++it;
913 }
914 }
915 }
916 }
917 }
918
919 [[nodiscard]] std::vector<std::optional<double>>
922 Utils::Vector3i const &upper_corner) const override {
923 std::vector<std::optional<double>> out;
924 auto const &lattice = get_lattice();
925 if (auto const ci = get_interval(lattice, lower_corner, upper_corner)) {
926 auto const local_offset = std::get<0>(lattice.get_local_grid_range());
927 auto const lower_cell = ci->min();
928 auto const upper_cell = ci->max();
929 auto const n_values = ci->numCells();
930 out.reserve(n_values);
931 for (auto x = lower_cell.x(); x <= upper_cell.x(); ++x) {
932 for (auto y = lower_cell.y(); y <= upper_cell.y(); ++y) {
933 for (auto z = lower_cell.z(); z <= upper_cell.z(); ++z) {
934 auto const node = local_offset + Utils::Vector3i{{x, y, z}};
935 if (m_boundary_density->node_is_boundary(node)) {
936 out.emplace_back(double_c(
937 m_boundary_density->get_node_value_at_boundary(node)));
938 } else {
939 out.emplace_back(std::nullopt);
940 }
941 }
942 }
943 }
944 assert(out.size() == n_values);
945 }
946 return out;
947 }
948
951 std::vector<std::optional<Utils::Vector3d>> const &flux) override {
953 auto const &lattice = get_lattice();
954 if (auto const ci = get_interval(lattice, lower_corner, upper_corner)) {
955 auto const local_offset = std::get<0>(lattice.get_local_grid_range());
956 auto const lower_cell = ci->min();
957 auto const upper_cell = ci->max();
958 auto it = flux.begin();
959 assert(flux.size() == ci->numCells());
960 for (auto x = lower_cell.x(); x <= upper_cell.x(); ++x) {
961 for (auto y = lower_cell.y(); y <= upper_cell.y(); ++y) {
962 for (auto z = lower_cell.z(); z <= upper_cell.z(); ++z) {
963 auto const node = local_offset + Utils::Vector3i{{x, y, z}};
964 auto const bc = get_block_and_cell(lattice, node, false);
965 auto const &opt = *it;
966 if (opt) {
967 m_boundary_flux->set_node_value_at_boundary(
968 node, to_vector3<FloatType>(*opt), *bc);
969 } else {
970 m_boundary_flux->remove_node_from_boundary(node, *bc);
971 }
972 ++it;
973 }
974 }
975 }
976 }
977 }
978
979 [[nodiscard]] std::vector<std::optional<Utils::Vector3d>>
982 Utils::Vector3i const &upper_corner) const override {
983 std::vector<std::optional<Utils::Vector3d>> out;
984 auto const &lattice = get_lattice();
985 if (auto const ci = get_interval(lattice, lower_corner, upper_corner)) {
986 auto const local_offset = std::get<0>(lattice.get_local_grid_range());
987 auto const lower_cell = ci->min();
988 auto const upper_cell = ci->max();
989 auto const n_values = ci->numCells();
990 out.reserve(n_values);
991 for (auto x = lower_cell.x(); x <= upper_cell.x(); ++x) {
992 for (auto y = lower_cell.y(); y <= upper_cell.y(); ++y) {
993 for (auto z = lower_cell.z(); z <= upper_cell.z(); ++z) {
994 auto const node = local_offset + Utils::Vector3i{{x, y, z}};
995 if (m_boundary_flux->node_is_boundary(node)) {
996 out.emplace_back(to_vector3d(
997 m_boundary_flux->get_node_value_at_boundary(node)));
998 } else {
999 out.emplace_back(std::nullopt);
1000 }
1001 }
1002 }
1003 }
1004 assert(out.size() == n_values);
1005 }
1006 return out;
1007 }
1008
1009 [[nodiscard]] std::vector<bool>
1011 Utils::Vector3i const &upper_corner) const override {
1012 std::vector<bool> out;
1013 auto const &lattice = get_lattice();
1014 if (auto const ci = get_interval(lattice, lower_corner, upper_corner)) {
1015 auto const local_offset = std::get<0>(lattice.get_local_grid_range());
1016 auto const lower_cell = ci->min();
1017 auto const upper_cell = ci->max();
1018 auto const n_values = ci->numCells();
1019 out.reserve(n_values);
1020 for (auto x = lower_cell.x(); x <= upper_cell.x(); ++x) {
1021 for (auto y = lower_cell.y(); y <= upper_cell.y(); ++y) {
1022 for (auto z = lower_cell.z(); z <= upper_cell.z(); ++z) {
1023 auto const node = local_offset + Utils::Vector3i{{x, y, z}};
1024 out.emplace_back(m_boundary_density->node_is_boundary(node) or
1025 m_boundary_flux->node_is_boundary(node));
1026 }
1027 }
1028 }
1029 assert(out.size() == n_values);
1030 }
1031 return out;
1032 }
1033
1035 auto bc = get_block_and_cell(get_lattice(), node, true);
1036 if (!bc)
1037 return false;
1038
1039 m_boundary_density->remove_node_from_boundary(node, *bc);
1040
1041 return true;
1042 }
1043
1044 [[nodiscard]] std::optional<bool>
1046 bool consider_ghosts) const override {
1049 if (!bc)
1050 return std::nullopt;
1051
1052 return {m_boundary_flux->node_is_boundary(node)};
1053 }
1054
1055 [[nodiscard]] std::optional<bool>
1057 bool consider_ghosts) const override {
1059 if (!bc)
1060 return std::nullopt;
1061
1062 return {m_boundary_density->node_is_boundary(node)};
1063 }
1064
1065 [[nodiscard]] std::optional<bool>
1067 bool consider_ghosts = false) const override {
1069 if (!bc)
1070 return std::nullopt;
1071
1072 return {m_boundary_density->node_is_boundary(node) or
1073 m_boundary_flux->node_is_boundary(node)};
1074 }
1075
1077 const std::vector<int> &raster_flat,
1078 const std::vector<double> &data_flat) override {
1080 auto const grid_size = get_lattice().get_grid_dimensions();
1081 auto const data = fill_3D_vector_array(data_flat, grid_size);
1084 }
1085
1087 const std::vector<int> &raster_flat,
1088 const std::vector<double> &data_flat) override {
1089 auto const grid_size = get_lattice().get_grid_dimensions();
1090 auto const data = fill_3D_scalar_array(data_flat, grid_size);
1092 data);
1094 }
1095
1097
1099 m_boundary_density->boundary_update();
1100 }
1101
1103 return *m_lattice;
1104 }
1105
1106 [[nodiscard]] bool is_gpu() const noexcept override {
1108 }
1109
1110 void register_vtk_field_filters(walberla::vtk::VTKOutput &vtk_obj) override {
1111 field::FlagFieldCellFilter<FlagField> fluid_filter(m_flag_field_density_id);
1112 fluid_filter.addFlag(Boundary_flag);
1113 vtk_obj.addCellExclusionFilter(fluid_filter);
1114 }
1115
1116protected:
1117 template <typename Field_T, uint_t F_SIZE_ARG, typename OutputType>
1118 class VTKWriter : public vtk::BlockCellDataWriter<OutputType, F_SIZE_ARG> {
1119 public:
1120 VTKWriter(ConstBlockDataID const &block_id, std::string const &id,
1121 FloatType unit_conversion)
1122 : vtk::BlockCellDataWriter<OutputType, F_SIZE_ARG>(id),
1125
1126 protected:
1127 void configure() override {
1129 m_field = this->block_->template getData<Field_T>(m_block_id);
1130 }
1131
1134 FloatType const m_conversion;
1135 };
1136
1137#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
1138 template <typename OutputType = float>
1139 class DensityVTKWriter : public VTKWriter<DensityFieldCpu, 1u, OutputType> {
1140 public:
1142 using Base::Base;
1143 using Base::evaluate;
1144
1145 protected:
1146 OutputType evaluate(cell_idx_t const x, cell_idx_t const y,
1147 cell_idx_t const z, cell_idx_t const) override {
1149 auto const density = ek::accessor::Scalar::get(this->m_field, {x, y, z});
1151 }
1152 };
1153#else
1154 template <typename OutputType = float>
1155 class DensityVTKWriter : public VTKWriter<DensityField, 1u, OutputType> {
1156 public:
1158 using Base::Base;
1159 using Base::evaluate;
1160
1161 protected:
1163 cell_idx_t const z, cell_idx_t const) override {
1165 auto const density = ek::accessor::Scalar::get(this->m_field, {x, y, z});
1167 }
1168 };
1169#endif
1170
1171#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
1172 template <typename OutputType = float>
1173 class FluxVTKWriter : public VTKWriter<FluxFieldCpu, 3u, OutputType> {
1174 public:
1176 using Base::Base;
1177 using Base::evaluate;
1178
1179 protected:
1180 OutputType evaluate(cell_idx_t const x, cell_idx_t const y,
1181 cell_idx_t const z, cell_idx_t const f) override {
1183 auto const flux =
1184 ek::accessor::Flux::get_vector(this->m_field, {x, y, z});
1186 }
1187 };
1188#else
1189 template <typename OutputType = float>
1190 class FluxVTKWriter : public VTKWriter<FluxField, 3u, OutputType> {
1191 public:
1193 using Base::Base;
1194 using Base::evaluate;
1195
1196 protected:
1198 cell_idx_t const z, cell_idx_t const f) override {
1200 auto const flux =
1201 ek::accessor::Flux::get_vector(this->m_field, {x, y, z});
1203 }
1204 };
1205#endif
1206
1207public:
1208 void register_vtk_field_writers(walberla::vtk::VTKOutput &vtk_obj,
1210 int flag_observables) override {
1211#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
1212 auto const allocate_cpu_field_if_empty =
1213 [&]<typename Field>(auto const &blocks, std::string name,
1214 std::optional<BlockDataID> &cpu_field) {
1215 if (not cpu_field) {
1216 cpu_field = field::addToStorage<Field>(
1217 blocks, name, FloatType{0}, field::fzyx,
1218 m_lattice->get_ghost_layers(), m_host_field_allocator);
1219 }
1220 };
1221#endif
1222 if (flag_observables & static_cast<int>(EKOutputVTK::density)) {
1223 auto const unit_conversion = FloatType_c(units.at("density"));
1224#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
1225 if constexpr (Architecture == lbmpy::Arch::GPU) {
1226 auto const &blocks = m_lattice->get_blocks();
1228 blocks, "density_cpu", m_density_cpu_field_id);
1229 vtk_obj.addBeforeFunction(
1230 gpu::fieldCpyFunctor<DensityFieldCpu, DensityField>(
1232 vtk_obj.addCellDataWriter(make_shared<DensityVTKWriter<float>>(
1234 } else {
1235#endif
1236 vtk_obj.addCellDataWriter(make_shared<DensityVTKWriter<float>>(
1237 m_density_field_id, "density", unit_conversion));
1238#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
1239 }
1240#endif
1241 }
1242 if (flag_observables & static_cast<int>(EKOutputVTK::flux)) {
1243 auto const unit_conversion = FloatType_c(units.at("flux"));
1244#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
1245 if constexpr (Architecture == lbmpy::Arch::GPU) {
1246 auto const &blocks = m_lattice->get_blocks();
1248 blocks, "flux_cpu", m_flux_cpu_field_id);
1249 vtk_obj.addBeforeFunction(gpu::fieldCpyFunctor<FluxFieldCpu, FluxField>(
1251 vtk_obj.addCellDataWriter(make_shared<FluxVTKWriter<float>>(
1253 } else {
1254#endif
1255 vtk_obj.addCellDataWriter(make_shared<FluxVTKWriter<float>>(
1257#if defined(__CUDACC__) and defined(WALBERLA_BUILD_WITH_CUDA)
1258 }
1259#endif
1260 }
1261 }
1262
1263 ~EKinWalberlaImpl() override = default;
1264};
1265
1266} // namespace walberla
Vector implementation and trait types for boost qvm interoperability.
Interface of a lattice-based electrokinetic model.
std::map< std::string, std::shared_ptr< VTKHandle > > m_vtk_auto
VTK writers that are executed automatically.
std::unordered_map< std::string, double > units_map
Class that runs and controls the BlockForest in waLBerla.
walberla::blockforest::StructuredBlockForest Lattice_T
std::pair< Utils::Vector3i, Utils::Vector3i > get_local_grid_range(bool with_halo=false) const
Utils::Vector3i get_block_corner(IBlock const &block, bool lower) const
Boundary class optimized for sparse data.
VTKWriter< DensityField, 1u, OutputType > Base
OutputType evaluate(cell_idx_t const x, cell_idx_t const y, cell_idx_t const z, cell_idx_t const) override
OutputType evaluate(cell_idx_t const x, cell_idx_t const y, cell_idx_t const z, cell_idx_t const f) override
VTKWriter< FluxField, 3u, OutputType > Base
VTKWriter(ConstBlockDataID const &block_id, std::string const &id, FloatType unit_conversion)
Class that runs and controls the EK on waLBerla.
~EKinWalberlaImpl() override=default
void set_slice_flux_boundary(Utils::Vector3i const &lower_corner, Utils::Vector3i const &upper_corner, std::vector< std::optional< Utils::Vector3d > > const &flux) override
void set_kT(double kT) override
std::optional< double > get_node_density_at_boundary(Utils::Vector3i const &node, bool consider_ghosts=false) const override
double get_kT() const noexcept override
std::unique_ptr< DiffusiveFluxKernelElectrostatic > m_diffusive_flux_electrostatic
walberla::FlagField< walberla::uint8_t > FlagField
void set_friction_coupling(bool friction_coupling) override
void set_rng_state(uint64_t counter) override
void integrate(std::size_t potential_id, std::size_t velocity_id, std::size_t force_id, double lb_density) override
bool set_node_density_boundary(Utils::Vector3i const &node, double density) override
void set_slice_density(Utils::Vector3i const &lower_corner, Utils::Vector3i const &upper_corner, std::vector< double > const &density) override
std::shared_ptr< FullCommunicator > m_full_communication
std::bitset< GhostComm::SIZE > m_pending_ghost_comm
std::vector< double > get_slice_density(Utils::Vector3i const &lower_corner, Utils::Vector3i const &upper_corner) const override
void update_density_boundary_from_shape(const std::vector< int > &raster_flat, const std::vector< double > &data_flat) override
void update_flux_boundary_from_shape(const std::vector< int > &raster_flat, const std::vector< double > &data_flat) override
std::vector< bool > get_slice_is_boundary(Utils::Vector3i const &lower_corner, Utils::Vector3i const &upper_corner) const override
void set_ext_efield(Utils::Vector3d const &field) override
bool set_node_density(Utils::Vector3i const &node, double density) override
double get_diffusion() const noexcept override
void set_valency(double valency) override
bool is_thermalized() const noexcept override
auto add_to_storage(std::string const tag, FloatType value)
Convenience function to add a field with a custom allocator.
std::optional< double > get_node_density(Utils::Vector3i const &node, bool consider_ghosts=false) const override
std::size_t get_density_id() const noexcept override
unsigned int get_seed() const noexcept override
std::unique_ptr< ContinuityKernel > m_continuity
double get_valency() const noexcept override
typename FieldTrait< FloatType, Architecture >::FluxField FluxField
std::shared_ptr< BoundaryModelFlux > m_boundary_flux
void set_slice_density_boundary(Utils::Vector3i const &lower_corner, Utils::Vector3i const &upper_corner, std::vector< std::optional< double > > const &density) override
void reset_flux_boundary_handling(std::shared_ptr< BlockStorage > const &blocks)
bool get_advection() const noexcept override
std::optional< Utils::Vector3d > get_node_flux_vector(Utils::Vector3i const &node, bool consider_ghosts=false) const override
typename FieldTrait< FloatType, Architecture >::template BoundaryCommScheme< typename stencil::D3Q27 > BoundaryFullCommunicator
typename FieldTrait< FloatType, Architecture >::template RegularCommScheme< typename stencil::D3Q27 > FullCommunicator
std::optional< uint64_t > get_rng_state() const override
void set_advection(bool advection) override
bool remove_node_from_density_boundary(Utils::Vector3i const &node) override
typename FieldTrait< FloatType, Architecture >::template PackInfo< Field > PackInfo
bool get_friction_coupling() const noexcept override
Utils::Vector3d get_ext_efield() const noexcept override
stencil::D3Q27 Stencil
Stencil for collision and streaming operations.
LatticeWalberla const & get_lattice() const noexcept override
bool remove_node_from_flux_boundary(Utils::Vector3i const &node) override
bool is_double_precision() const noexcept override
typename FieldTrait< FloatType, Architecture >::DensityField DensityField
bool is_gpu() const noexcept override
void clear_density_boundaries() override
std::optional< bool > get_node_is_flux_boundary(Utils::Vector3i const &node, bool consider_ghosts) const override
LatticeWalberla::Lattice_T BlockStorage
Lattice model (e.g.
void reset_density_boundary_handling(std::shared_ptr< BlockStorage > const &blocks)
std::vector< std::optional< Utils::Vector3d > > get_slice_flux_at_boundary(Utils::Vector3i const &lower_corner, Utils::Vector3i const &upper_corner) const override
ResourceObserver m_mpi_cart_comm_observer
std::vector< std::optional< double > > get_slice_density_at_boundary(Utils::Vector3i const &lower_corner, Utils::Vector3i const &upper_corner) const override
void register_vtk_field_writers(walberla::vtk::VTKOutput &vtk_obj, LatticeModel::units_map const &units, int flag_observables) override
std::optional< Utils::Vector3d > get_node_flux_at_boundary(Utils::Vector3i const &node, bool consider_ghosts=false) const override
std::vector< double > get_slice_flux_vector(Utils::Vector3i const &lower_corner, Utils::Vector3i const &upper_corner) const override
typename FieldTrait< FloatType >::DensityField _DensityField
std::shared_ptr< BoundaryFullCommunicator > m_boundary_communicator
std::optional< bool > get_node_is_density_boundary(Utils::Vector3i const &node, bool consider_ghosts) const override
std::unique_ptr< DiffusiveFluxKernel > m_diffusive_flux
void set_diffusion(double diffusion) override
std::size_t stencil_size() const noexcept override
FlagUID const Boundary_flag
Flag for boundary cells.
bool set_node_flux_boundary(Utils::Vector3i const &node, Utils::Vector3d const &flux) override
typename FieldTrait< FloatType >::FluxField _FluxField
std::optional< bool > get_node_is_boundary(Utils::Vector3i const &node, bool consider_ghosts=false) const override
std::unique_ptr< BoundaryModelDensity > m_boundary_density
EKinWalberlaImpl(std::shared_ptr< LatticeWalberla > lattice, double diffusion, double kT, double valency, Utils::Vector3d const &ext_efield, double density, bool advection, bool friction_coupling, bool thermalized, unsigned int seed)
std::shared_ptr< LatticeWalberla > m_lattice
Block forest.
FlagUID const Domain_flag
Flag for domain cells, i.e.
void register_vtk_field_filters(walberla::vtk::VTKOutput &vtk_obj) override
Base class for LB field VTK writers.
void setup_boundary_handle(std::shared_ptr< LatticeWalberla > lattice, std::shared_ptr< Boundary_T > boundary)
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
static double * block(double *p, std::size_t index, std::size_t size)
Definition elc.cpp:177
STL namespace.
auto get_vector(GhostLayerField< double, uint_t{13u}> const *flux_field, Cell const &cell)
void initialize(GhostLayerField< double, uint_t{13u}> *flux_field, std::array< double, 13 > const &values)
void initialize(GhostLayerField< double, 1u > *scalar_field, double const &value)
void set(GhostLayerField< double, 1u > *scalar_field, double const &value, Cell const &cell)
auto get(GhostLayerField< double, 1u > const *scalar_field, Cell const &cell)
static FUNC_PREFIX double *RESTRICT const double *RESTRICT const int64_t const int64_t const int64_t const int64_t const int64_t const int64_t const int64_t const int64_t const int64_t const int64_t const uint32_t uint32_t uint32_t uint32_t uint32_t uint32_t uint32_t seed
\file PackInfoPdfDoublePrecision.cpp \author pystencils
auto to_vector3d(Vector3< T > const &v) noexcept
std::vector< double > fill_3D_scalar_array(std::vector< double > const &vec_flat, Utils::Vector3i const &grid_size)
Definition boundary.hpp:60
void set_boundary_from_grid(BoundaryModel &boundary, LatticeWalberla const &lattice, std::vector< int > const &raster_flat, std::vector< DataType > const &data_flat)
Definition boundary.hpp:81
void copy_block_buffer(CellInterval const &bci, CellInterval const &ci, Utils::Vector3i const &block_offset, Utils::Vector3i const &lower_corner, auto &&kernel)
Synchronize data between a sliced block and a container.
std::optional< BlockAndCell > get_block_and_cell(::LatticeWalberla const &lattice, signed_integral_vector auto const &node, bool consider_ghost_layers)
Cell to_cell(signed_integral_vector auto const &xyz)
ResourceObserver get_mpi_cart_comm_observer()
Get an observer on waLBerla's MPI Cartesian communicator status.
std::optional< walberla::cell::CellInterval > get_block_interval(::LatticeWalberla const &lattice, Utils::Vector3i const &lower_corner, Utils::Vector3i const &upper_corner, Utils::Vector3i const &block_offset, IBlock const &block)
std::optional< walberla::cell::CellInterval > get_interval(::LatticeWalberla const &lattice, Utils::Vector3i const &lower_corner, Utils::Vector3i const &upper_corner)
std::vector< Utils::Vector3d > fill_3D_vector_array(std::vector< double > const &vec_flat, Utils::Vector3i const &grid_size)
Definition boundary.hpp:36
Observer to monitor the lifetime of a shared resource.
blockforest::communication::UniformBufferedScheme< Stencil > RegularCommScheme
GhostLayerField< FT, FluxCount > FluxField
field::communication::PackInfo< Field > PackInfo
blockforest::communication::UniformBufferedScheme< Stencil > BoundaryCommScheme
GhostCommFlags
Ghost communication operations.