76 auto rho_0 =
block->getData<field::GhostLayerField<double, 1>>(rho_0ID);
77 auto rho_1 =
block->getData<field::GhostLayerField<double, 1>>(rho_1ID);
78 auto rho_3 =
block->getData<field::GhostLayerField<double, 1>>(rho_3ID);
79 auto rho_4 =
block->getData<field::GhostLayerField<double, 1>>(rho_4ID);
80 auto rho_2 =
block->getData<field::GhostLayerField<double, 1>>(rho_2ID);
82 auto &stoech_2 = this->stoech_2_;
83 auto &rate_coefficient = this->rate_coefficient_;
84 auto &stoech_1 = this->stoech_1_;
85 auto &stoech_3 = this->stoech_3_;
86 auto &order_0 = this->order_0_;
87 auto &stoech_0 = this->stoech_0_;
88 auto &order_2 = this->order_2_;
89 auto &order_4 = this->order_4_;
90 auto &order_1 = this->order_1_;
91 auto &stoech_4 = this->stoech_4_;
92 auto &order_3 = this->order_3_;
93 WALBERLA_ASSERT_GREATER_EQUAL(0, -int_c(rho_0->nrOfGhostLayers()))
94 double *
RESTRICT _data_rho_0 = rho_0->dataAt(0, 0, 0, 0);
95 WALBERLA_ASSERT_GREATER_EQUAL(0, -int_c(rho_1->nrOfGhostLayers()))
96 double *
RESTRICT _data_rho_1 = rho_1->dataAt(0, 0, 0, 0);
97 WALBERLA_ASSERT_GREATER_EQUAL(0, -int_c(rho_2->nrOfGhostLayers()))
98 double *
RESTRICT _data_rho_2 = rho_2->dataAt(0, 0, 0, 0);
99 WALBERLA_ASSERT_GREATER_EQUAL(0, -int_c(rho_3->nrOfGhostLayers()))
100 double *
RESTRICT _data_rho_3 = rho_3->dataAt(0, 0, 0, 0);
101 WALBERLA_ASSERT_GREATER_EQUAL(0, -int_c(rho_4->nrOfGhostLayers()))
102 double *
RESTRICT _data_rho_4 = rho_4->dataAt(0, 0, 0, 0);
103 WALBERLA_ASSERT_GREATER_EQUAL(rho_0->xSizeWithGhostLayer(), int64_t(int64_c(rho_0->xSize()) + 0))
104 const int64_t _size_rho_0_0 = int64_t(int64_c(rho_0->xSize()) + 0);
105 WALBERLA_ASSERT_GREATER_EQUAL(rho_0->ySizeWithGhostLayer(), int64_t(int64_c(rho_0->ySize()) + 0))
106 const int64_t _size_rho_0_1 = int64_t(int64_c(rho_0->ySize()) + 0);
107 WALBERLA_ASSERT_GREATER_EQUAL(rho_0->zSizeWithGhostLayer(), int64_t(int64_c(rho_0->zSize()) + 0))
108 const int64_t _size_rho_0_2 = int64_t(int64_c(rho_0->zSize()) + 0);
109 const int64_t _stride_rho_0_0 = int64_t(rho_0->xStride());
110 const int64_t _stride_rho_0_1 = int64_t(rho_0->yStride());
111 const int64_t _stride_rho_0_2 = int64_t(rho_0->zStride());
112 const int64_t _stride_rho_1_0 = int64_t(rho_1->xStride());
113 const int64_t _stride_rho_1_1 = int64_t(rho_1->yStride());
114 const int64_t _stride_rho_1_2 = int64_t(rho_1->zStride());
115 const int64_t _stride_rho_2_0 = int64_t(rho_2->xStride());
116 const int64_t _stride_rho_2_1 = int64_t(rho_2->yStride());
117 const int64_t _stride_rho_2_2 = int64_t(rho_2->zStride());
118 const int64_t _stride_rho_3_0 = int64_t(rho_3->xStride());
119 const int64_t _stride_rho_3_1 = int64_t(rho_3->yStride());
120 const int64_t _stride_rho_3_2 = int64_t(rho_3->zStride());
121 const int64_t _stride_rho_4_0 = int64_t(rho_4->xStride());
122 const int64_t _stride_rho_4_1 = int64_t(rho_4->yStride());
123 const int64_t _stride_rho_4_2 = int64_t(rho_4->zStride());
124 internal_5119d69793e3096feaaca816d627c080::reactionkernelbulk_5_double_precision_reactionkernelbulk_5_double_precision(_data_rho_0, _data_rho_1, _data_rho_2, _data_rho_3, _data_rho_4, _size_rho_0_0, _size_rho_0_1, _size_rho_0_2, _stride_rho_0_0, _stride_rho_0_1, _stride_rho_0_2, _stride_rho_1_0, _stride_rho_1_1, _stride_rho_1_2, _stride_rho_2_0, _stride_rho_2_1, _stride_rho_2_2, _stride_rho_3_0, _stride_rho_3_1, _stride_rho_3_2, _stride_rho_4_0, _stride_rho_4_1, _stride_rho_4_2, order_0, order_1, order_2, order_3, order_4, rate_coefficient, stoech_0, stoech_1, stoech_2, stoech_3, stoech_4);
129 CellInterval ci = globalCellInterval;
130 CellInterval blockBB = blocks->getBlockCellBB(*
block);
131 blockBB.expand(ghostLayers);
132 ci.intersect(blockBB);
133 blocks->transformGlobalToBlockLocalCellInterval(ci, *
block);
137 auto rho_0 =
block->getData<field::GhostLayerField<double, 1>>(rho_0ID);
138 auto rho_1 =
block->getData<field::GhostLayerField<double, 1>>(rho_1ID);
139 auto rho_3 =
block->getData<field::GhostLayerField<double, 1>>(rho_3ID);
140 auto rho_4 =
block->getData<field::GhostLayerField<double, 1>>(rho_4ID);
141 auto rho_2 =
block->getData<field::GhostLayerField<double, 1>>(rho_2ID);
143 auto &stoech_2 = this->stoech_2_;
144 auto &rate_coefficient = this->rate_coefficient_;
145 auto &stoech_1 = this->stoech_1_;
146 auto &stoech_3 = this->stoech_3_;
147 auto &order_0 = this->order_0_;
148 auto &stoech_0 = this->stoech_0_;
149 auto &order_2 = this->order_2_;
150 auto &order_4 = this->order_4_;
151 auto &order_1 = this->order_1_;
152 auto &stoech_4 = this->stoech_4_;
153 auto &order_3 = this->order_3_;
154 WALBERLA_ASSERT_GREATER_EQUAL(ci.xMin(), -int_c(rho_0->nrOfGhostLayers()))
155 WALBERLA_ASSERT_GREATER_EQUAL(ci.yMin(), -int_c(rho_0->nrOfGhostLayers()))
156 WALBERLA_ASSERT_GREATER_EQUAL(ci.zMin(), -int_c(rho_0->nrOfGhostLayers()))
157 double *
RESTRICT _data_rho_0 = rho_0->dataAt(ci.xMin(), ci.yMin(), ci.zMin(), 0);
158 WALBERLA_ASSERT_GREATER_EQUAL(ci.xMin(), -int_c(rho_1->nrOfGhostLayers()))
159 WALBERLA_ASSERT_GREATER_EQUAL(ci.yMin(), -int_c(rho_1->nrOfGhostLayers()))
160 WALBERLA_ASSERT_GREATER_EQUAL(ci.zMin(), -int_c(rho_1->nrOfGhostLayers()))
161 double *
RESTRICT _data_rho_1 = rho_1->dataAt(ci.xMin(), ci.yMin(), ci.zMin(), 0);
162 WALBERLA_ASSERT_GREATER_EQUAL(ci.xMin(), -int_c(rho_2->nrOfGhostLayers()))
163 WALBERLA_ASSERT_GREATER_EQUAL(ci.yMin(), -int_c(rho_2->nrOfGhostLayers()))
164 WALBERLA_ASSERT_GREATER_EQUAL(ci.zMin(), -int_c(rho_2->nrOfGhostLayers()))
165 double *
RESTRICT _data_rho_2 = rho_2->dataAt(ci.xMin(), ci.yMin(), ci.zMin(), 0);
166 WALBERLA_ASSERT_GREATER_EQUAL(ci.xMin(), -int_c(rho_3->nrOfGhostLayers()))
167 WALBERLA_ASSERT_GREATER_EQUAL(ci.yMin(), -int_c(rho_3->nrOfGhostLayers()))
168 WALBERLA_ASSERT_GREATER_EQUAL(ci.zMin(), -int_c(rho_3->nrOfGhostLayers()))
169 double *
RESTRICT _data_rho_3 = rho_3->dataAt(ci.xMin(), ci.yMin(), ci.zMin(), 0);
170 WALBERLA_ASSERT_GREATER_EQUAL(ci.xMin(), -int_c(rho_4->nrOfGhostLayers()))
171 WALBERLA_ASSERT_GREATER_EQUAL(ci.yMin(), -int_c(rho_4->nrOfGhostLayers()))
172 WALBERLA_ASSERT_GREATER_EQUAL(ci.zMin(), -int_c(rho_4->nrOfGhostLayers()))
173 double *
RESTRICT _data_rho_4 = rho_4->dataAt(ci.xMin(), ci.yMin(), ci.zMin(), 0);
174 WALBERLA_ASSERT_GREATER_EQUAL(rho_0->xSizeWithGhostLayer(), int64_t(int64_c(ci.xSize()) + 0))
175 const int64_t _size_rho_0_0 = int64_t(int64_c(ci.xSize()) + 0);
176 WALBERLA_ASSERT_GREATER_EQUAL(rho_0->ySizeWithGhostLayer(), int64_t(int64_c(ci.ySize()) + 0))
177 const int64_t _size_rho_0_1 = int64_t(int64_c(ci.ySize()) + 0);
178 WALBERLA_ASSERT_GREATER_EQUAL(rho_0->zSizeWithGhostLayer(), int64_t(int64_c(ci.zSize()) + 0))
179 const int64_t _size_rho_0_2 = int64_t(int64_c(ci.zSize()) + 0);
180 const int64_t _stride_rho_0_0 = int64_t(rho_0->xStride());
181 const int64_t _stride_rho_0_1 = int64_t(rho_0->yStride());
182 const int64_t _stride_rho_0_2 = int64_t(rho_0->zStride());
183 const int64_t _stride_rho_1_0 = int64_t(rho_1->xStride());
184 const int64_t _stride_rho_1_1 = int64_t(rho_1->yStride());
185 const int64_t _stride_rho_1_2 = int64_t(rho_1->zStride());
186 const int64_t _stride_rho_2_0 = int64_t(rho_2->xStride());
187 const int64_t _stride_rho_2_1 = int64_t(rho_2->yStride());
188 const int64_t _stride_rho_2_2 = int64_t(rho_2->zStride());
189 const int64_t _stride_rho_3_0 = int64_t(rho_3->xStride());
190 const int64_t _stride_rho_3_1 = int64_t(rho_3->yStride());
191 const int64_t _stride_rho_3_2 = int64_t(rho_3->zStride());
192 const int64_t _stride_rho_4_0 = int64_t(rho_4->xStride());
193 const int64_t _stride_rho_4_1 = int64_t(rho_4->yStride());
194 const int64_t _stride_rho_4_2 = int64_t(rho_4->zStride());
195 internal_5119d69793e3096feaaca816d627c080::reactionkernelbulk_5_double_precision_reactionkernelbulk_5_double_precision(_data_rho_0, _data_rho_1, _data_rho_2, _data_rho_3, _data_rho_4, _size_rho_0_0, _size_rho_0_1, _size_rho_0_2, _stride_rho_0_0, _stride_rho_0_1, _stride_rho_0_2, _stride_rho_1_0, _stride_rho_1_1, _stride_rho_1_2, _stride_rho_2_0, _stride_rho_2_1, _stride_rho_2_2, _stride_rho_3_0, _stride_rho_3_1, _stride_rho_3_2, _stride_rho_4_0, _stride_rho_4_1, _stride_rho_4_2, order_0, order_1, order_2, order_3, order_4, rate_coefficient, stoech_0, stoech_1, stoech_2, stoech_3, stoech_4);
static FUNC_PREFIX void reactionkernelbulk_5_double_precision_reactionkernelbulk_5_double_precision(double *RESTRICT _data_rho_0, double *RESTRICT _data_rho_1, double *RESTRICT _data_rho_2, double *RESTRICT _data_rho_3, double *RESTRICT _data_rho_4, int64_t const _size_rho_0_0, int64_t const _size_rho_0_1, int64_t const _size_rho_0_2, int64_t const _stride_rho_0_0, int64_t const _stride_rho_0_1, int64_t const _stride_rho_0_2, int64_t const _stride_rho_1_0, int64_t const _stride_rho_1_1, int64_t const _stride_rho_1_2, int64_t const _stride_rho_2_0, int64_t const _stride_rho_2_1, int64_t const _stride_rho_2_2, int64_t const _stride_rho_3_0, int64_t const _stride_rho_3_1, int64_t const _stride_rho_3_2, int64_t const _stride_rho_4_0, int64_t const _stride_rho_4_1, int64_t const _stride_rho_4_2, double order_0, double order_1, double order_2, double order_3, double order_4, double rate_coefficient, double stoech_0, double stoech_1, double stoech_2, double stoech_3, double stoech_4)