25#include "core/DataTypes.h"
26#include "core/logging/Logging.h"
28#include "domain_decomposition/BlockDataID.h"
29#include "domain_decomposition/IBlock.h"
30#include "domain_decomposition/StructuredBlockStorage.h"
31#include "field/GhostLayerField.h"
32#include "field/SwapableCompare.h"
35#include <unordered_map>
38#define RESTRICT __restrict__
43#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || \
44 (defined WALBERLA_CXX_COMPILER_IS_CLANG)
45#pragma GCC diagnostic push
46#pragma GCC diagnostic ignored "-Wunused-parameter"
47#pragma GCC diagnostic ignored "-Wreorder"
57 double omega_even,
double omega_odd,
double omega_shear,
uint32_t seed,
60 omega_even_(omega_even), omega_odd_(omega_odd),
61 omega_shear_(omega_shear), seed_(seed), time_step_(time_step),
66 for (
auto p : cache_pdfs_) {
81 return [kernel](IBlock *b) { kernel->run(b); };
86 const shared_ptr<StructuredBlockStorage> &
blocks,
94 return [
this](IBlock *b) { this->
run(b); };
97 std::function<
void(IBlock *)>
118 inline double getKt()
const {
return kT_; }
126 block_offset_0_ = value;
129 block_offset_1_ = value;
132 block_offset_2_ = value;
134 inline void setKt(
const double value) { kT_ = value; }
155 std::unordered_map<IBlock *, field::GhostLayerField<double, 19> *>
164#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || \
165 (defined WALBERLA_CXX_COMPILER_IS_CLANG)
166#pragma GCC diagnostic pop
uint32_t getBlock_offset_0() const
void setTime_step(const uint32_t value)
uint32_t getBlock_offset_2() const
void operator()(IBlock *block)
double getOmega_bulk() const
double getOmega_shear() const
std::function< void(IBlock *)> getSweep()
void setBlock_offset_1(const uint32_t value)
void setOmega_odd(const double value)
void setOmega_shear(const double value)
uint32_t getTime_step() const
void setOmega_bulk(const double value)
uint32_t getBlock_offset_1() const
double getOmega_odd() const
void setSeed(const uint32_t value)
std::function< void(IBlock *)> getSweepOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers=1)
void runOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers, IBlock *block)
static std::function< void(IBlock *)> getSweep(const shared_ptr< StreamCollideSweepThermalizedDoublePrecision > &kernel)
void configure(const shared_ptr< StructuredBlockStorage > &blocks, IBlock *block)
StreamCollideSweepThermalizedDoublePrecision(BlockDataID forceID_, BlockDataID pdfsID_, double kT, double omega_bulk, double omega_even, double omega_odd, double omega_shear, uint32_t seed, uint32_t time_step)
void setBlock_offset_2(const uint32_t value)
void setKt(const double value)
void setBlock_offset_0(const uint32_t value)
double getOmega_even() const
~StreamCollideSweepThermalizedDoublePrecision()
void setOmega_even(const double value)
static std::function< void(IBlock *)> getSweepOnCellInterval(const shared_ptr< StreamCollideSweepThermalizedDoublePrecision > &kernel, const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers=1)
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
static double * block(double *p, std::size_t index, std::size_t size)
\file PackInfoPdfDoublePrecision.cpp \author pystencils