ESPResSo
Extensible Simulation Package for Research on Soft Matter Systems
Loading...
Searching...
No Matches
StreamCollideSweepThermalizedDoublePrecisionCUDA.h
Go to the documentation of this file.
1//======================================================================================================================
2//
3// This file is part of waLBerla. waLBerla is free software: you can
4// redistribute it and/or modify it under the terms of the GNU General Public
5// License as published by the Free Software Foundation, either version 3 of
6// the License, or (at your option) any later version.
7//
8// waLBerla is distributed in the hope that it will be useful, but WITHOUT
9// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11// for more details.
12//
13// You should have received a copy of the GNU General Public License along
14// with waLBerla (see COPYING.txt). If not, see <http://www.gnu.org/licenses/>.
15//
16//! \\file StreamCollideSweepThermalizedDoublePrecisionCUDA.h
17//! \\author pystencils
18//======================================================================================================================
19
20// kernel generated with pystencils v1.4+1.ge851f4e, lbmpy v1.4+1.ge9efe34,
21// sympy v1.12.1, lbmpy_walberla/pystencils_walberla from waLBerla commit
22// 007e77e077ad9d22b5eed6f3d3118240993e553c
23
24#pragma once
25#include "core/DataTypes.h"
26#include "core/logging/Logging.h"
27
28#include "gpu/GPUField.h"
29#include "gpu/GPUWrapper.h"
30
31#include "domain_decomposition/BlockDataID.h"
32#include "domain_decomposition/IBlock.h"
33#include "domain_decomposition/StructuredBlockStorage.h"
34#include "field/SwapableCompare.h"
35
36#include <functional>
37#include <unordered_map>
38
39#ifdef __GNUC__
40#define RESTRICT __restrict__
41#else
42#define RESTRICT
43#endif
44
45#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || \
46 (defined WALBERLA_CXX_COMPILER_IS_CLANG)
47#pragma GCC diagnostic push
48#pragma GCC diagnostic ignored "-Wunused-parameter"
49#pragma GCC diagnostic ignored "-Wreorder"
50#endif
51
52namespace walberla {
53namespace pystencils {
54
56public:
58 BlockDataID forceID_, BlockDataID pdfsID_, double kT, double omega_bulk,
59 double omega_even, double omega_odd, double omega_shear, uint32_t seed,
60 uint32_t time_step)
61 : forceID(forceID_), pdfsID(pdfsID_), kT_(kT), omega_bulk_(omega_bulk),
62 omega_even_(omega_even), omega_odd_(omega_odd),
63 omega_shear_(omega_shear), seed_(seed), time_step_(time_step),
64 block_offset_0_(uint32_t(0)), block_offset_1_(uint32_t(0)),
65 block_offset_2_(uint32_t(0)), configured_(false) {}
66
68 for (auto p : cache_pdfs_) {
69 delete p.second;
70 }
71 }
72
73 void run(IBlock *block, gpuStream_t stream = nullptr);
74
75 void runOnCellInterval(const shared_ptr<StructuredBlockStorage> &blocks,
76 const CellInterval &globalCellInterval,
77 cell_idx_t ghostLayers, IBlock *block,
78 gpuStream_t stream = nullptr);
79
80 void operator()(IBlock *block, gpuStream_t stream = nullptr) {
82 }
83
84 static std::function<void(IBlock *)>
85 getSweep(const shared_ptr<StreamCollideSweepThermalizedDoublePrecisionCUDA>
86 &kernel) {
87 return [kernel](IBlock *b) { kernel->run(b); };
88 }
89
90 static std::function<void(IBlock *, gpuStream_t)> getSweepOnCellInterval(
91 const shared_ptr<StreamCollideSweepThermalizedDoublePrecisionCUDA>
92 &kernel,
93 const shared_ptr<StructuredBlockStorage> &blocks,
94 const CellInterval &globalCellInterval, cell_idx_t ghostLayers = 1) {
95 return [kernel, blocks, globalCellInterval,
96 ghostLayers](IBlock *b, gpuStream_t stream = nullptr) {
97 kernel->runOnCellInterval(blocks, globalCellInterval, ghostLayers, b,
98 stream);
99 };
100 }
101
102 std::function<void(IBlock *)> getSweep(gpuStream_t stream = nullptr) {
103 return [this, stream](IBlock *b) { this->run(b, stream); };
104 }
105
106 std::function<void(IBlock *)>
107 getSweepOnCellInterval(const shared_ptr<StructuredBlockStorage> &blocks,
108 const CellInterval &globalCellInterval,
109 cell_idx_t ghostLayers = 1,
110 gpuStream_t stream = nullptr) {
111 return [this, blocks, globalCellInterval, ghostLayers, stream](IBlock *b) {
112 this->runOnCellInterval(blocks, globalCellInterval, ghostLayers, b,
113 stream);
114 };
115 }
116
117 void configure(const shared_ptr<StructuredBlockStorage> &blocks,
118 IBlock *block) {
119 Cell BlockCellBB = blocks->getBlockCellBB(*block).min();
120 block_offset_0_ = uint32_t(BlockCellBB[0]);
121 block_offset_1_ = uint32_t(BlockCellBB[1]);
122 block_offset_2_ = uint32_t(BlockCellBB[2]);
123 configured_ = true;
124 }
125
126 inline uint32_t getBlock_offset_0() const { return block_offset_0_; }
127 inline uint32_t getBlock_offset_1() const { return block_offset_1_; }
128 inline uint32_t getBlock_offset_2() const { return block_offset_2_; }
129 inline double getKt() const { return kT_; }
130 inline double getOmega_bulk() const { return omega_bulk_; }
131 inline double getOmega_even() const { return omega_even_; }
132 inline double getOmega_odd() const { return omega_odd_; }
133 inline double getOmega_shear() const { return omega_shear_; }
134 inline uint32_t getSeed() const { return seed_; }
135 inline uint32_t getTime_step() const { return time_step_; }
136 inline void setBlock_offset_0(const uint32_t value) {
137 block_offset_0_ = value;
138 }
139 inline void setBlock_offset_1(const uint32_t value) {
140 block_offset_1_ = value;
141 }
142 inline void setBlock_offset_2(const uint32_t value) {
143 block_offset_2_ = value;
144 }
145 inline void setKt(const double value) { kT_ = value; }
146 inline void setOmega_bulk(const double value) { omega_bulk_ = value; }
147 inline void setOmega_even(const double value) { omega_even_ = value; }
148 inline void setOmega_odd(const double value) { omega_odd_ = value; }
149 inline void setOmega_shear(const double value) { omega_shear_ = value; }
150 inline void setSeed(const uint32_t value) { seed_ = value; }
151 inline void setTime_step(const uint32_t value) { time_step_ = value; }
152
153private:
154 BlockDataID forceID;
155 BlockDataID pdfsID;
156 uint32_t block_offset_0_;
157 uint32_t block_offset_1_;
158 uint32_t block_offset_2_;
159 double kT_;
160 double omega_bulk_;
161 double omega_even_;
162 double omega_odd_;
163 double omega_shear_;
164 uint32_t seed_;
165 uint32_t time_step_;
166 std::unordered_map<IBlock *, gpu::GPUField<double> *> cache_pdfs_;
167
168 bool configured_;
169};
170
171} // namespace pystencils
172} // namespace walberla
173
174#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || \
175 (defined WALBERLA_CXX_COMPILER_IS_CLANG)
176#pragma GCC diagnostic pop
177#endif
Definition Cell.hpp:96
StreamCollideSweepThermalizedDoublePrecisionCUDA(BlockDataID forceID_, BlockDataID pdfsID_, double kT, double omega_bulk, double omega_even, double omega_odd, double omega_shear, uint32_t seed, uint32_t time_step)
void runOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers, IBlock *block, gpuStream_t stream=nullptr)
static std::function< void(IBlock *)> getSweep(const shared_ptr< StreamCollideSweepThermalizedDoublePrecisionCUDA > &kernel)
static std::function< void(IBlock *, gpuStream_t)> getSweepOnCellInterval(const shared_ptr< StreamCollideSweepThermalizedDoublePrecisionCUDA > &kernel, const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers=1)
void configure(const shared_ptr< StructuredBlockStorage > &blocks, IBlock *block)
std::function< void(IBlock *)> getSweepOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers=1, gpuStream_t stream=nullptr)
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
static double * block(double *p, std::size_t index, std::size_t size)
Definition elc.cpp:176
\file PackInfoPdfDoublePrecision.cpp \author pystencils