ESPResSo
Extensible Simulation Package for Research on Soft Matter Systems
Loading...
Searching...
No Matches
DiffusiveFluxKernelWithElectrostaticThermalized_double_precision_CUDA.h
Go to the documentation of this file.
1//======================================================================================================================
2//
3// This file is part of waLBerla. waLBerla is free software: you can
4// redistribute it and/or modify it under the terms of the GNU General Public
5// License as published by the Free Software Foundation, either version 3 of
6// the License, or (at your option) any later version.
7//
8// waLBerla is distributed in the hope that it will be useful, but WITHOUT
9// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11// for more details.
12//
13// You should have received a copy of the GNU General Public License along
14// with waLBerla (see COPYING.txt). If not, see <http://www.gnu.org/licenses/>.
15//
16//! \\file
17//! DiffusiveFluxKernelWithElectrostaticThermalized_double_precision_CUDA.h
18//! \\author pystencils
19//======================================================================================================================
20
21// kernel generated with pystencils v1.4+1.ge851f4e, lbmpy v1.4+1.ge9efe34,
22// sympy v1.12.1, lbmpy_walberla/pystencils_walberla from waLBerla commit
23// 007e77e077ad9d22b5eed6f3d3118240993e553c
24
25#pragma once
26#include "core/DataTypes.h"
27#include "core/logging/Logging.h"
28
29#include "gpu/GPUField.h"
30#include "gpu/GPUWrapper.h"
31
32#include "domain_decomposition/BlockDataID.h"
33#include "domain_decomposition/IBlock.h"
34#include "domain_decomposition/StructuredBlockStorage.h"
35#include "field/SwapableCompare.h"
36
37#include <functional>
38#include <unordered_map>
39
40#ifdef __GNUC__
41#define RESTRICT __restrict__
42#else
43#define RESTRICT
44#endif
45
46#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || \
47 (defined WALBERLA_CXX_COMPILER_IS_CLANG)
48#pragma GCC diagnostic push
49#pragma GCC diagnostic ignored "-Wunused-parameter"
50#pragma GCC diagnostic ignored "-Wreorder"
51#endif
52
53namespace walberla {
54namespace pystencils {
55
57public:
59 BlockDataID jID_, BlockDataID phiID_, BlockDataID rhoID_, double D,
60 double f_ext_0, double f_ext_1, double f_ext_2, uint32_t field_size_0,
61 uint32_t field_size_1, uint32_t field_size_2, double kT, uint32_t seed,
62 uint32_t time_step, double z)
63 : jID(jID_), phiID(phiID_), rhoID(rhoID_), D_(D), f_ext_0_(f_ext_0),
64 f_ext_1_(f_ext_1), f_ext_2_(f_ext_2), field_size_0_(field_size_0),
65 field_size_1_(field_size_1), field_size_2_(field_size_2), kT_(kT),
66 seed_(seed), time_step_(time_step), z_(z), block_offset_0_(uint32_t(0)),
67 block_offset_1_(uint32_t(0)), block_offset_2_(uint32_t(0)),
68 configured_(false) {}
69
70 void run(IBlock *block, gpuStream_t stream = nullptr);
71
72 void runOnCellInterval(const shared_ptr<StructuredBlockStorage> &blocks,
73 const CellInterval &globalCellInterval,
74 cell_idx_t ghostLayers, IBlock *block,
75 gpuStream_t stream = nullptr);
76
77 void operator()(IBlock *block, gpuStream_t stream = nullptr) {
79 }
80
81 static std::function<void(IBlock *)> getSweep(
82 const shared_ptr<
84 &kernel) {
85 return [kernel](IBlock *b) { kernel->run(b); };
86 }
87
88 static std::function<void(IBlock *, gpuStream_t)> getSweepOnCellInterval(
89 const shared_ptr<
91 &kernel,
92 const shared_ptr<StructuredBlockStorage> &blocks,
93 const CellInterval &globalCellInterval, cell_idx_t ghostLayers = 1) {
94 return [kernel, blocks, globalCellInterval,
95 ghostLayers](IBlock *b, gpuStream_t stream = nullptr) {
96 kernel->runOnCellInterval(blocks, globalCellInterval, ghostLayers, b,
97 stream);
98 };
99 }
100
101 std::function<void(IBlock *)> getSweep(gpuStream_t stream = nullptr) {
102 return [this, stream](IBlock *b) { this->run(b, stream); };
103 }
104
105 std::function<void(IBlock *)>
106 getSweepOnCellInterval(const shared_ptr<StructuredBlockStorage> &blocks,
107 const CellInterval &globalCellInterval,
108 cell_idx_t ghostLayers = 1,
109 gpuStream_t stream = nullptr) {
110 return [this, blocks, globalCellInterval, ghostLayers, stream](IBlock *b) {
111 this->runOnCellInterval(blocks, globalCellInterval, ghostLayers, b,
112 stream);
113 };
114 }
115
116 void configure(const shared_ptr<StructuredBlockStorage> &blocks,
117 IBlock *block) {
118 Cell BlockCellBB = blocks->getBlockCellBB(*block).min();
119 block_offset_0_ = uint32_t(BlockCellBB[0]);
120 block_offset_1_ = uint32_t(BlockCellBB[1]);
121 block_offset_2_ = uint32_t(BlockCellBB[2]);
122 configured_ = true;
123 }
124
125 inline double getD() const { return D_; }
126 inline uint32_t getBlock_offset_0() const { return block_offset_0_; }
127 inline uint32_t getBlock_offset_1() const { return block_offset_1_; }
128 inline uint32_t getBlock_offset_2() const { return block_offset_2_; }
129 inline double getF_ext_0() const { return f_ext_0_; }
130 inline double getF_ext_1() const { return f_ext_1_; }
131 inline double getF_ext_2() const { return f_ext_2_; }
132 inline uint32_t getField_size_0() const { return field_size_0_; }
133 inline uint32_t getField_size_1() const { return field_size_1_; }
134 inline uint32_t getField_size_2() const { return field_size_2_; }
135 inline double getKt() const { return kT_; }
136 inline uint32_t getSeed() const { return seed_; }
137 inline uint32_t getTime_step() const { return time_step_; }
138 inline double getZ() const { return z_; }
139 inline void setD(const double value) { D_ = value; }
140 inline void setBlock_offset_0(const uint32_t value) {
141 block_offset_0_ = value;
142 }
143 inline void setBlock_offset_1(const uint32_t value) {
144 block_offset_1_ = value;
145 }
146 inline void setBlock_offset_2(const uint32_t value) {
147 block_offset_2_ = value;
148 }
149 inline void setF_ext_0(const double value) { f_ext_0_ = value; }
150 inline void setF_ext_1(const double value) { f_ext_1_ = value; }
151 inline void setF_ext_2(const double value) { f_ext_2_ = value; }
152 inline void setField_size_0(const uint32_t value) { field_size_0_ = value; }
153 inline void setField_size_1(const uint32_t value) { field_size_1_ = value; }
154 inline void setField_size_2(const uint32_t value) { field_size_2_ = value; }
155 inline void setKt(const double value) { kT_ = value; }
156 inline void setSeed(const uint32_t value) { seed_ = value; }
157 inline void setTime_step(const uint32_t value) { time_step_ = value; }
158 inline void setZ(const double value) { z_ = value; }
159
160private:
161 BlockDataID jID;
162 BlockDataID phiID;
163
164public:
165 inline void setPhiID(BlockDataID phiID_) { phiID = phiID_; }
166
167private:
168 BlockDataID rhoID;
169 double D_;
170 uint32_t block_offset_0_;
171 uint32_t block_offset_1_;
172 uint32_t block_offset_2_;
173 double f_ext_0_;
174 double f_ext_1_;
175 double f_ext_2_;
176 uint32_t field_size_0_;
177 uint32_t field_size_1_;
178 uint32_t field_size_2_;
179 double kT_;
180 uint32_t seed_;
181 uint32_t time_step_;
182 double z_;
183
184 bool configured_;
185};
186
187} // namespace pystencils
188} // namespace walberla
189
190#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || \
191 (defined WALBERLA_CXX_COMPILER_IS_CLANG)
192#pragma GCC diagnostic pop
193#endif
Definition Cell.hpp:96
DiffusiveFluxKernelWithElectrostaticThermalized_double_precision_CUDA(BlockDataID jID_, BlockDataID phiID_, BlockDataID rhoID_, double D, double f_ext_0, double f_ext_1, double f_ext_2, uint32_t field_size_0, uint32_t field_size_1, uint32_t field_size_2, double kT, uint32_t seed, uint32_t time_step, double z)
static std::function< void(IBlock *)> getSweep(const shared_ptr< DiffusiveFluxKernelWithElectrostaticThermalized_double_precision_CUDA > &kernel)
static std::function< void(IBlock *, gpuStream_t)> getSweepOnCellInterval(const shared_ptr< DiffusiveFluxKernelWithElectrostaticThermalized_double_precision_CUDA > &kernel, const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers=1)
std::function< void(IBlock *)> getSweepOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers=1, gpuStream_t stream=nullptr)
void runOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers, IBlock *block, gpuStream_t stream=nullptr)
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
static double * block(double *p, std::size_t index, std::size_t size)
Definition elc.cpp:176
\file PackInfoPdfDoublePrecision.cpp \author pystencils