100 auto j =
block->getData<field::GhostLayerField<float, 13>>(jID);
101 auto rho =
block->getData<field::GhostLayerField<float, 1>>(rhoID);
102 auto phi =
block->getData<field::GhostLayerField<float, 1>>(phiID);
104 auto &f_ext_1 = this->f_ext_1_;
105 auto &f_ext_2 = this->f_ext_2_;
107 auto &f_ext_0 = this->f_ext_0_;
108 auto &kT = this->kT_;
110 WALBERLA_ASSERT_GREATER_EQUAL(-1, -int_c(j->nrOfGhostLayers()))
111 float *
RESTRICT const _data_j = j->dataAt(-1, -1, -1, 0);
112 WALBERLA_ASSERT_GREATER_EQUAL(-1, -int_c(phi->nrOfGhostLayers()))
113 float *
RESTRICT const _data_phi = phi->dataAt(-1, -1, -1, 0);
114 WALBERLA_ASSERT_GREATER_EQUAL(-1, -int_c(rho->nrOfGhostLayers()))
115 float *
RESTRICT const _data_rho = rho->dataAt(-1, -1, -1, 0);
116 WALBERLA_ASSERT_GREATER_EQUAL(j->xSizeWithGhostLayer(), int64_t(int64_c(j->xSize()) + 2))
117 const int64_t _size_j_0 = int64_t(int64_c(j->xSize()) + 2);
118 WALBERLA_ASSERT_GREATER_EQUAL(j->ySizeWithGhostLayer(), int64_t(int64_c(j->ySize()) + 2))
119 const int64_t _size_j_1 = int64_t(int64_c(j->ySize()) + 2);
120 WALBERLA_ASSERT_GREATER_EQUAL(j->zSizeWithGhostLayer(), int64_t(int64_c(j->zSize()) + 2))
121 const int64_t _size_j_2 = int64_t(int64_c(j->zSize()) + 2);
122 const int64_t _stride_j_0 = int64_t(j->xStride());
123 const int64_t _stride_j_1 = int64_t(j->yStride());
124 const int64_t _stride_j_2 = int64_t(j->zStride());
125 const int64_t _stride_j_3 = int64_t(1 * int64_t(j->fStride()));
126 const int64_t _stride_phi_0 = int64_t(phi->xStride());
127 const int64_t _stride_phi_1 = int64_t(phi->yStride());
128 const int64_t _stride_phi_2 = int64_t(phi->zStride());
129 const int64_t _stride_rho_0 = int64_t(rho->xStride());
130 const int64_t _stride_rho_1 = int64_t(rho->yStride());
131 const int64_t _stride_rho_2 = int64_t(rho->zStride());
132 internal_823ab2463d465630661d5edc8f90930c::diffusivefluxkernelwithelectrostatic_single_precision_diffusivefluxkernelwithelectrostatic_single_precision(D, _data_j, _data_phi, _data_rho, _size_j_0, _size_j_1, _size_j_2, _stride_j_0, _stride_j_1, _stride_j_2, _stride_j_3, _stride_phi_0, _stride_phi_1, _stride_phi_2, _stride_rho_0, _stride_rho_1, _stride_rho_2, f_ext_0, f_ext_1, f_ext_2, kT, z);
137 CellInterval ci = globalCellInterval;
138 CellInterval blockBB = blocks->getBlockCellBB(*
block);
139 blockBB.expand(ghostLayers);
140 ci.intersect(blockBB);
141 blocks->transformGlobalToBlockLocalCellInterval(ci, *
block);
145 auto j =
block->getData<field::GhostLayerField<float, 13>>(jID);
146 auto rho =
block->getData<field::GhostLayerField<float, 1>>(rhoID);
147 auto phi =
block->getData<field::GhostLayerField<float, 1>>(phiID);
149 auto &f_ext_1 = this->f_ext_1_;
150 auto &f_ext_2 = this->f_ext_2_;
152 auto &f_ext_0 = this->f_ext_0_;
153 auto &kT = this->kT_;
155 WALBERLA_ASSERT_GREATER_EQUAL(ci.xMin() - 1, -int_c(j->nrOfGhostLayers()))
156 WALBERLA_ASSERT_GREATER_EQUAL(ci.yMin() - 1, -int_c(j->nrOfGhostLayers()))
157 WALBERLA_ASSERT_GREATER_EQUAL(ci.zMin() - 1, -int_c(j->nrOfGhostLayers()))
158 float *
RESTRICT const _data_j = j->dataAt(ci.xMin() - 1, ci.yMin() - 1, ci.zMin() - 1, 0);
159 WALBERLA_ASSERT_GREATER_EQUAL(ci.xMin() - 1, -int_c(phi->nrOfGhostLayers()))
160 WALBERLA_ASSERT_GREATER_EQUAL(ci.yMin() - 1, -int_c(phi->nrOfGhostLayers()))
161 WALBERLA_ASSERT_GREATER_EQUAL(ci.zMin() - 1, -int_c(phi->nrOfGhostLayers()))
162 float *
RESTRICT const _data_phi = phi->dataAt(ci.xMin() - 1, ci.yMin() - 1, ci.zMin() - 1, 0);
163 WALBERLA_ASSERT_GREATER_EQUAL(ci.xMin() - 1, -int_c(rho->nrOfGhostLayers()))
164 WALBERLA_ASSERT_GREATER_EQUAL(ci.yMin() - 1, -int_c(rho->nrOfGhostLayers()))
165 WALBERLA_ASSERT_GREATER_EQUAL(ci.zMin() - 1, -int_c(rho->nrOfGhostLayers()))
166 float *
RESTRICT const _data_rho = rho->dataAt(ci.xMin() - 1, ci.yMin() - 1, ci.zMin() - 1, 0);
167 WALBERLA_ASSERT_GREATER_EQUAL(j->xSizeWithGhostLayer(), int64_t(int64_c(ci.xSize()) + 2))
168 const int64_t _size_j_0 = int64_t(int64_c(ci.xSize()) + 2);
169 WALBERLA_ASSERT_GREATER_EQUAL(j->ySizeWithGhostLayer(), int64_t(int64_c(ci.ySize()) + 2))
170 const int64_t _size_j_1 = int64_t(int64_c(ci.ySize()) + 2);
171 WALBERLA_ASSERT_GREATER_EQUAL(j->zSizeWithGhostLayer(), int64_t(int64_c(ci.zSize()) + 2))
172 const int64_t _size_j_2 = int64_t(int64_c(ci.zSize()) + 2);
173 const int64_t _stride_j_0 = int64_t(j->xStride());
174 const int64_t _stride_j_1 = int64_t(j->yStride());
175 const int64_t _stride_j_2 = int64_t(j->zStride());
176 const int64_t _stride_j_3 = int64_t(1 * int64_t(j->fStride()));
177 const int64_t _stride_phi_0 = int64_t(phi->xStride());
178 const int64_t _stride_phi_1 = int64_t(phi->yStride());
179 const int64_t _stride_phi_2 = int64_t(phi->zStride());
180 const int64_t _stride_rho_0 = int64_t(rho->xStride());
181 const int64_t _stride_rho_1 = int64_t(rho->yStride());
182 const int64_t _stride_rho_2 = int64_t(rho->zStride());
183 internal_823ab2463d465630661d5edc8f90930c::diffusivefluxkernelwithelectrostatic_single_precision_diffusivefluxkernelwithelectrostatic_single_precision(D, _data_j, _data_phi, _data_rho, _size_j_0, _size_j_1, _size_j_2, _stride_j_0, _stride_j_1, _stride_j_2, _stride_j_3, _stride_phi_0, _stride_phi_1, _stride_phi_2, _stride_rho_0, _stride_rho_1, _stride_rho_2, f_ext_0, f_ext_1, f_ext_2, kT, z);
static FUNC_PREFIX void diffusivefluxkernelwithelectrostatic_single_precision_diffusivefluxkernelwithelectrostatic_single_precision(float D, float *RESTRICT const _data_j, float *RESTRICT const _data_phi, float *RESTRICT const _data_rho, int64_t const _size_j_0, int64_t const _size_j_1, int64_t const _size_j_2, int64_t const _stride_j_0, int64_t const _stride_j_1, int64_t const _stride_j_2, int64_t const _stride_j_3, int64_t const _stride_phi_0, int64_t const _stride_phi_1, int64_t const _stride_phi_2, int64_t const _stride_rho_0, int64_t const _stride_rho_1, int64_t const _stride_rho_2, float f_ext_0, float f_ext_1, float f_ext_2, float kT, float z)