25#include "core/DataTypes.h"
26#include "core/logging/Logging.h"
28#include "gpu/GPUField.h"
29#include "gpu/GPUWrapper.h"
31#include "domain_decomposition/BlockDataID.h"
32#include "domain_decomposition/IBlock.h"
33#include "domain_decomposition/StructuredBlockStorage.h"
34#include "field/SwapableCompare.h"
37#include <unordered_map>
40#define RESTRICT __restrict__
42#define RESTRICT __restrict
47#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || \
48 (defined WALBERLA_CXX_COMPILER_IS_CLANG)
49#pragma GCC diagnostic push
50#pragma GCC diagnostic ignored "-Wunused-parameter"
51#pragma GCC diagnostic ignored "-Wreorder"
60 BlockDataID forceID_, BlockDataID pdfsID_,
float kT,
float omega_bulk,
61 float omega_even,
float omega_odd,
float omega_shear, uint32_t seed,
63 : forceID(forceID_), pdfsID(pdfsID_), kT_(kT), omega_bulk_(omega_bulk),
64 omega_even_(omega_even), omega_odd_(omega_odd),
65 omega_shear_(omega_shear), seed_(seed), time_step_(time_step),
66 block_offset_0_(uint32_t(0)), block_offset_1_(uint32_t(0)),
67 block_offset_2_(uint32_t(0)), configured_(false) {}
70 for (
auto p : cache_pdfs_) {
78 const CellInterval &globalCellInterval,
79 cell_idx_t ghostLayers, IBlock *
block,
80 gpuStream_t
stream =
nullptr);
86 static std::function<void(IBlock *)>
87 getSweep(
const shared_ptr<StreamCollideSweepThermalizedSinglePrecisionCUDA>
89 return [kernel](IBlock *b) { kernel->run(b); };
93 const shared_ptr<StreamCollideSweepThermalizedSinglePrecisionCUDA>
95 const shared_ptr<StructuredBlockStorage> &blocks,
96 const CellInterval &globalCellInterval, cell_idx_t ghostLayers = 1) {
97 return [kernel, blocks, globalCellInterval,
98 ghostLayers](IBlock *b, gpuStream_t
stream =
nullptr) {
99 kernel->runOnCellInterval(blocks, globalCellInterval, ghostLayers, b,
108 std::function<void(IBlock *)>
110 const CellInterval &globalCellInterval,
111 cell_idx_t ghostLayers = 1,
112 gpuStream_t
stream =
nullptr) {
113 return [
this, blocks, globalCellInterval, ghostLayers,
stream](IBlock *b) {
119 void configure(
const shared_ptr<StructuredBlockStorage> &blocks,
121 Cell BlockCellBB = blocks->getBlockCellBB(*block).min();
122 block_offset_0_ = uint32_t(BlockCellBB[0]);
123 block_offset_1_ = uint32_t(BlockCellBB[1]);
124 block_offset_2_ = uint32_t(BlockCellBB[2]);
131 inline float getKt()
const {
return kT_; }
136 inline uint32_t
getSeed()
const {
return seed_; }
139 block_offset_0_ = value;
142 block_offset_1_ = value;
145 block_offset_2_ = value;
147 inline void setKt(
const float value) { kT_ = value; }
152 inline void setSeed(
const uint32_t value) { seed_ = value; }
158 uint32_t block_offset_0_;
159 uint32_t block_offset_1_;
160 uint32_t block_offset_2_;
168 std::unordered_map<IBlock *, gpu::GPUField<float> *> cache_pdfs_;
176#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || \
177 (defined WALBERLA_CXX_COMPILER_IS_CLANG)
178#pragma GCC diagnostic pop
void setOmega_bulk(const float value)
uint32_t getBlock_offset_0() const
uint32_t getTime_step() const
void runOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers, IBlock *block, gpuStream_t stream=nullptr)
~StreamCollideSweepThermalizedSinglePrecisionCUDA()
uint32_t getBlock_offset_1() const
void setBlock_offset_1(const uint32_t value)
void configure(const shared_ptr< StructuredBlockStorage > &blocks, IBlock *block)
void setTime_step(const uint32_t value)
void setOmega_even(const float value)
void setKt(const float value)
void setBlock_offset_0(const uint32_t value)
std::function< void(IBlock *)> getSweepOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers=1, gpuStream_t stream=nullptr)
void setBlock_offset_2(const uint32_t value)
float getOmega_bulk() const
void operator()(IBlock *block, gpuStream_t stream=nullptr)
static std::function< void(IBlock *)> getSweep(const shared_ptr< StreamCollideSweepThermalizedSinglePrecisionCUDA > &kernel)
StreamCollideSweepThermalizedSinglePrecisionCUDA(BlockDataID forceID_, BlockDataID pdfsID_, float kT, float omega_bulk, float omega_even, float omega_odd, float omega_shear, uint32_t seed, uint32_t time_step)
float getOmega_odd() const
void setOmega_shear(const float value)
static std::function< void(IBlock *, gpuStream_t)> getSweepOnCellInterval(const shared_ptr< StreamCollideSweepThermalizedSinglePrecisionCUDA > &kernel, const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers=1)
void run(IBlock *block, gpuStream_t stream=nullptr)
float getOmega_shear() const
std::function< void(IBlock *)> getSweep(gpuStream_t stream=nullptr)
float getOmega_even() const
uint32_t getBlock_offset_2() const
void setSeed(const uint32_t value)
void setOmega_odd(const float value)
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
static double * block(double *p, std::size_t index, std::size_t size)
\file PackInfoPdfDoublePrecision.cpp \author pystencils