25#include "core/DataTypes.h"
26#include "core/logging/Logging.h"
28#include "gpu/GPUField.h"
29#include "gpu/GPUWrapper.h"
31#include "domain_decomposition/BlockDataID.h"
32#include "domain_decomposition/IBlock.h"
33#include "domain_decomposition/StructuredBlockStorage.h"
34#include "field/SwapableCompare.h"
37#include <unordered_map>
40#define RESTRICT __restrict__
42#define RESTRICT __restrict
47#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || \
48 (defined WALBERLA_CXX_COMPILER_IS_CLANG)
49#pragma GCC diagnostic push
50#pragma GCC diagnostic ignored "-Wunused-parameter"
51#pragma GCC diagnostic ignored "-Wreorder"
60 BlockDataID rho_0ID_, BlockDataID rho_1ID_, BlockDataID rho_2ID_,
61 BlockDataID rho_3ID_,
float order_0,
float order_1,
float order_2,
62 float order_3,
float rate_coefficient,
float stoech_0,
float stoech_1,
63 float stoech_2,
float stoech_3)
64 : rho_0ID(rho_0ID_), rho_1ID(rho_1ID_), rho_2ID(rho_2ID_),
65 rho_3ID(rho_3ID_), order_0_(order_0), order_1_(order_1),
66 order_2_(order_2), order_3_(order_3),
67 rate_coefficient_(rate_coefficient), stoech_0_(stoech_0),
68 stoech_1_(stoech_1), stoech_2_(stoech_2), stoech_3_(stoech_3) {}
73 const CellInterval &globalCellInterval,
74 cell_idx_t ghostLayers, IBlock *
block,
75 gpuStream_t
stream =
nullptr);
82 const shared_ptr<ReactionKernelBulk_4_single_precision_CUDA> &kernel) {
83 return [kernel](IBlock *b) { kernel->run(b); };
87 const shared_ptr<ReactionKernelBulk_4_single_precision_CUDA> &kernel,
88 const shared_ptr<StructuredBlockStorage> &blocks,
89 const CellInterval &globalCellInterval, cell_idx_t ghostLayers = 1) {
90 return [kernel, blocks, globalCellInterval,
91 ghostLayers](IBlock *b, gpuStream_t
stream =
nullptr) {
92 kernel->runOnCellInterval(blocks, globalCellInterval, ghostLayers, b,
101 std::function<void(IBlock *)>
103 const CellInterval &globalCellInterval,
104 cell_idx_t ghostLayers = 1,
105 gpuStream_t
stream =
nullptr) {
106 return [
this, blocks, globalCellInterval, ghostLayers,
stream](IBlock *b) {
112 void configure(
const shared_ptr<StructuredBlockStorage> & ,
124 inline void setOrder_0(
const float value) { order_0_ = value; }
125 inline void setOrder_1(
const float value) { order_1_ = value; }
126 inline void setOrder_2(
const float value) { order_2_ = value; }
127 inline void setOrder_3(
const float value) { order_3_ = value; }
129 rate_coefficient_ = value;
145 float rate_coefficient_;
155#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || \
156 (defined WALBERLA_CXX_COMPILER_IS_CLANG)
157#pragma GCC diagnostic pop
void operator()(IBlock *block, gpuStream_t stream=nullptr)
void setStoech_2(const float value)
float getStoech_1() const
float getStoech_0() const
float getStoech_2() const
void setOrder_3(const float value)
void setOrder_1(const float value)
void setOrder_0(const float value)
void configure(const shared_ptr< StructuredBlockStorage > &, IBlock *)
void setStoech_1(const float value)
void run(IBlock *block, gpuStream_t stream=nullptr)
float getRate_coefficient() const
void setStoech_0(const float value)
ReactionKernelBulk_4_single_precision_CUDA(BlockDataID rho_0ID_, BlockDataID rho_1ID_, BlockDataID rho_2ID_, BlockDataID rho_3ID_, float order_0, float order_1, float order_2, float order_3, float rate_coefficient, float stoech_0, float stoech_1, float stoech_2, float stoech_3)
void setOrder_2(const float value)
std::function< void(IBlock *)> getSweepOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers=1, gpuStream_t stream=nullptr)
std::function< void(IBlock *)> getSweep(gpuStream_t stream=nullptr)
void setRate_coefficient(const float value)
float getStoech_3() const
void setStoech_3(const float value)
static std::function< void(IBlock *, gpuStream_t)> getSweepOnCellInterval(const shared_ptr< ReactionKernelBulk_4_single_precision_CUDA > &kernel, const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers=1)
static std::function< void(IBlock *)> getSweep(const shared_ptr< ReactionKernelBulk_4_single_precision_CUDA > &kernel)
void runOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers, IBlock *block, gpuStream_t stream=nullptr)
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
static double * block(double *p, std::size_t index, std::size_t size)
\file PackInfoPdfDoublePrecision.cpp \author pystencils