Loading [MathJax]/extensions/TeX/AMSmath.js
ESPResSo
Extensible Simulation Package for Research on Soft Matter Systems
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages Concepts
DiffusiveFluxKernel_double_precision.cpp
Go to the documentation of this file.
1//======================================================================================================================
2//
3// This file is part of waLBerla. waLBerla is free software: you can
4// redistribute it and/or modify it under the terms of the GNU General Public
5// License as published by the Free Software Foundation, either version 3 of
6// the License, or (at your option) any later version.
7//
8// waLBerla is distributed in the hope that it will be useful, but WITHOUT
9// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11// for more details.
12//
13// You should have received a copy of the GNU General Public License along
14// with waLBerla (see COPYING.txt). If not, see <http://www.gnu.org/licenses/>.
15//
16//! \\file DiffusiveFluxKernel_double_precision.cpp
17//! \\author pystencils
18//======================================================================================================================
19
20// kernel generated with pystencils v1.3.3, lbmpy v1.3.3, lbmpy_walberla/pystencils_walberla from waLBerla commit b0842e1a493ce19ef1bbb8d2cf382fc343970a7f
21
22#include <cmath>
23
25#include "core/DataTypes.h"
26#include "core/Macros.h"
27
28#define FUNC_PREFIX
29
30#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || (defined WALBERLA_CXX_COMPILER_IS_CLANG)
31#pragma GCC diagnostic push
32#pragma GCC diagnostic ignored "-Wfloat-equal"
33#pragma GCC diagnostic ignored "-Wshadow"
34#pragma GCC diagnostic ignored "-Wconversion"
35#pragma GCC diagnostic ignored "-Wunused-variable"
36#endif
37
38#if (defined WALBERLA_CXX_COMPILER_IS_INTEL)
39#pragma warning push
40#pragma warning(disable : 1599)
41#endif
42
43using namespace std;
44
45namespace walberla {
46namespace pystencils {
47
48namespace internal_e5e04d1215f19faa51f3c55db6d456a2 {
50 {
51 {
52 {
53 if (0 < _size_j_1 - 1 && 0 < _size_j_2 - 1) {
55 }
56 for (int64_t ctr_0 = 2; ctr_0 < _size_j_0 - 1; ctr_0 += 1) {
57 if (0 < _size_j_1 - 1 && 0 < _size_j_2 - 1) {
59 }
60 }
61 if (0 < _size_j_1 - 1 && 0 < _size_j_2 - 1) {
63 }
64 }
65 for (int64_t ctr_1 = 1; ctr_1 < _size_j_1 - 1; ctr_1 += 1) {
66 {
67 {
68 if (ctr_1 > 0 && 0 < _size_j_2 - 1 && ctr_1 < _size_j_1 - 1) {
70 }
71 if (ctr_1 > 0 && 0 < _size_j_2 - 1 && 1 < _size_j_0 - 1) {
73 }
74 if (ctr_1 > 0 && 0 < _size_j_2 - 1) {
76 }
77 if (0 < _size_j_2 - 1 && ctr_1 < _size_j_1 - 1) {
79 }
80 }
81 for (int64_t ctr_0 = 2; ctr_0 < _size_j_0 - 1; ctr_0 += 1) {
82 if (ctr_1 > 0 && 0 < _size_j_2 - 1 && ctr_1 < _size_j_1 - 1) {
84 }
85 if (ctr_1 > 0 && 0 < _size_j_2 - 1 && ctr_0 < _size_j_0 - 1) {
87 }
88 if (ctr_1 > 0 && 0 < _size_j_2 - 1) {
90 }
91 if (0 < _size_j_2 - 1 && ctr_1 < _size_j_1 - 1) {
93 }
94 }
95 {
96 if (ctr_1 > 0 && 0 < _size_j_2 - 1 && ctr_1 < _size_j_1 - 1) {
98 }
99 if (ctr_1 > 0 && 0 < _size_j_2 - 1) {
101 }
102 if (0 < _size_j_2 - 1 && ctr_1 < _size_j_1 - 1) {
104 }
105 }
106 }
107 }
108 {
109 {
110 if (_size_j_1 - 1 > 0 && 0 < _size_j_2 - 1 && 1 < _size_j_0 - 1) {
112 }
113 if (_size_j_1 - 1 > 0 && 0 < _size_j_2 - 1) {
115 }
116 }
117 for (int64_t ctr_0 = 2; ctr_0 < _size_j_0 - 1; ctr_0 += 1) {
118 if (_size_j_1 - 1 > 0 && 0 < _size_j_2 - 1 && ctr_0 < _size_j_0 - 1) {
120 }
121 if (_size_j_1 - 1 > 0 && 0 < _size_j_2 - 1) {
123 }
124 }
125 if (_size_j_1 - 1 > 0 && 0 < _size_j_2 - 1) {
127 }
128 }
129 }
130 for (int64_t ctr_2 = 1; ctr_2 < _size_j_2 - 1; ctr_2 += 1) {
131 {
132 {
133 {
134 if (ctr_2 > 0 && 0 < _size_j_1 - 1 && ctr_2 < _size_j_2 - 1) {
136 }
137 if (ctr_2 > 0 && 0 < _size_j_1 - 1) {
139 }
140 if (0 < _size_j_1 - 1 && ctr_2 < _size_j_2 - 1) {
142 }
143 }
144 for (int64_t ctr_0 = 2; ctr_0 < _size_j_0 - 1; ctr_0 += 1) {
145 if (ctr_2 > 0 && 0 < _size_j_1 - 1 && ctr_2 < _size_j_2 - 1) {
147 }
148 if (ctr_2 > 0 && 0 < _size_j_1 - 1) {
150 }
151 if (0 < _size_j_1 - 1 && ctr_2 < _size_j_2 - 1) {
153 }
154 }
155 {
156 if (ctr_2 > 0 && 0 < _size_j_1 - 1 && ctr_2 < _size_j_2 - 1) {
158 }
159 if (ctr_2 > 0 && 0 < _size_j_1 - 1) {
161 }
162 if (0 < _size_j_1 - 1 && ctr_2 < _size_j_2 - 1) {
164 }
165 }
166 }
167 for (int64_t ctr_1 = 1; ctr_1 < _size_j_1 - 1; ctr_1 += 1) {
169 {
170 {
171 if (ctr_1 > 0 && ctr_2 > 0 && 1 < _size_j_0 - 1 && ctr_2 < _size_j_2 - 1) {
173 }
174 if (ctr_1 > 0 && ctr_2 > 0 && 1 < _size_j_0 - 1 && ctr_1 < _size_j_1 - 1) {
176 }
181 if (ctr_1 > 0 && ctr_2 > 0 && 1 < _size_j_0 - 1) {
183 }
184 if (ctr_1 > 0 && 1 < _size_j_0 - 1 && ctr_2 < _size_j_2 - 1) {
186 }
191 }
192 for (int64_t ctr_0 = 2; ctr_0 < _size_j_0 - 1; ctr_0 += 1) {
206 }
216 }
217 }
218 {
219 {
220 if (ctr_2 > 0 && _size_j_1 - 1 > 0 && 1 < _size_j_0 - 1 && ctr_2 < _size_j_2 - 1) {
222 }
223 if (ctr_2 > 0 && _size_j_1 - 1 > 0 && ctr_2 < _size_j_2 - 1) {
225 }
226 if (ctr_2 > 0 && _size_j_1 - 1 > 0 && 1 < _size_j_0 - 1) {
228 }
229 if (_size_j_1 - 1 > 0 && 1 < _size_j_0 - 1 && ctr_2 < _size_j_2 - 1) {
231 }
232 if (ctr_2 > 0 && _size_j_1 - 1 > 0) {
234 }
235 if (_size_j_1 - 1 > 0 && ctr_2 < _size_j_2 - 1) {
237 }
238 }
239 for (int64_t ctr_0 = 2; ctr_0 < _size_j_0 - 1; ctr_0 += 1) {
240 if (ctr_2 > 0 && _size_j_1 - 1 > 0 && ctr_0 < _size_j_0 - 1 && ctr_2 < _size_j_2 - 1) {
242 }
243 if (ctr_2 > 0 && _size_j_1 - 1 > 0 && ctr_2 < _size_j_2 - 1) {
245 }
246 if (ctr_2 > 0 && _size_j_1 - 1 > 0 && ctr_0 < _size_j_0 - 1) {
248 }
249 if (_size_j_1 - 1 > 0 && ctr_0 < _size_j_0 - 1 && ctr_2 < _size_j_2 - 1) {
251 }
252 if (ctr_2 > 0 && _size_j_1 - 1 > 0) {
254 }
255 if (_size_j_1 - 1 > 0 && ctr_2 < _size_j_2 - 1) {
257 }
258 }
259 {
260 if (ctr_2 > 0 && _size_j_1 - 1 > 0 && ctr_2 < _size_j_2 - 1) {
262 }
263 if (ctr_2 > 0 && _size_j_1 - 1 > 0) {
265 }
266 if (_size_j_1 - 1 > 0 && ctr_2 < _size_j_2 - 1) {
268 }
269 }
270 }
271 }
272 }
273 {
274 {
275 if (_size_j_2 - 1 > 0 && 0 < _size_j_1 - 1) {
277 }
278 for (int64_t ctr_0 = 2; ctr_0 < _size_j_0 - 1; ctr_0 += 1) {
279 if (_size_j_2 - 1 > 0 && 0 < _size_j_1 - 1) {
281 }
282 }
283 if (_size_j_2 - 1 > 0 && 0 < _size_j_1 - 1) {
285 }
286 }
287 for (int64_t ctr_1 = 1; ctr_1 < _size_j_1 - 1; ctr_1 += 1) {
288 {
289 {
290 if (ctr_1 > 0 && _size_j_2 - 1 > 0 && 1 < _size_j_0 - 1 && ctr_1 < _size_j_1 - 1) {
292 }
293 if (ctr_1 > 0 && _size_j_2 - 1 > 0 && ctr_1 < _size_j_1 - 1) {
295 }
296 if (ctr_1 > 0 && _size_j_2 - 1 > 0 && 1 < _size_j_0 - 1) {
298 }
299 if (ctr_1 > 0 && _size_j_2 - 1 > 0) {
301 }
302 if (_size_j_2 - 1 > 0 && ctr_1 < _size_j_1 - 1) {
304 }
305 }
306 for (int64_t ctr_0 = 2; ctr_0 < _size_j_0 - 1; ctr_0 += 1) {
307 if (ctr_1 > 0 && _size_j_2 - 1 > 0 && ctr_0 < _size_j_0 - 1 && ctr_1 < _size_j_1 - 1) {
309 }
310 if (ctr_1 > 0 && _size_j_2 - 1 > 0 && ctr_1 < _size_j_1 - 1) {
312 }
313 if (ctr_1 > 0 && _size_j_2 - 1 > 0 && ctr_0 < _size_j_0 - 1) {
315 }
316 if (ctr_1 > 0 && _size_j_2 - 1 > 0) {
318 }
319 if (_size_j_2 - 1 > 0 && ctr_1 < _size_j_1 - 1) {
321 }
322 }
323 {
324 if (ctr_1 > 0 && _size_j_2 - 1 > 0 && ctr_1 < _size_j_1 - 1) {
326 }
327 if (ctr_1 > 0 && _size_j_2 - 1 > 0) {
329 }
330 if (_size_j_2 - 1 > 0 && ctr_1 < _size_j_1 - 1) {
332 }
333 }
334 }
335 }
336 {
337 {
338 if (_size_j_1 - 1 > 0 && _size_j_2 - 1 > 0 && 1 < _size_j_0 - 1) {
340 }
341 if (_size_j_1 - 1 > 0 && _size_j_2 - 1 > 0) {
343 }
344 }
345 for (int64_t ctr_0 = 2; ctr_0 < _size_j_0 - 1; ctr_0 += 1) {
346 if (_size_j_1 - 1 > 0 && _size_j_2 - 1 > 0 && ctr_0 < _size_j_0 - 1) {
348 }
349 if (_size_j_1 - 1 > 0 && _size_j_2 - 1 > 0) {
351 }
352 }
353 if (_size_j_1 - 1 > 0 && _size_j_2 - 1 > 0) {
355 }
356 }
357 }
358 }
359}
360} // namespace internal_e5e04d1215f19faa51f3c55db6d456a2
361
363
364 auto j = block->getData<field::GhostLayerField<double, 13>>(jID);
365 auto rho = block->getData<field::GhostLayerField<double, 1>>(rhoID);
366
367 auto &D = this->D_;
368 WALBERLA_ASSERT_GREATER_EQUAL(-1, -int_c(j->nrOfGhostLayers()))
369 double *RESTRICT const _data_j = j->dataAt(-1, -1, -1, 0);
370 WALBERLA_ASSERT_GREATER_EQUAL(-1, -int_c(rho->nrOfGhostLayers()))
371 double *RESTRICT const _data_rho = rho->dataAt(-1, -1, -1, 0);
372 WALBERLA_ASSERT_GREATER_EQUAL(j->xSizeWithGhostLayer(), int64_t(int64_c(j->xSize()) + 2))
373 const int64_t _size_j_0 = int64_t(int64_c(j->xSize()) + 2);
374 WALBERLA_ASSERT_GREATER_EQUAL(j->ySizeWithGhostLayer(), int64_t(int64_c(j->ySize()) + 2))
375 const int64_t _size_j_1 = int64_t(int64_c(j->ySize()) + 2);
376 WALBERLA_ASSERT_GREATER_EQUAL(j->zSizeWithGhostLayer(), int64_t(int64_c(j->zSize()) + 2))
377 const int64_t _size_j_2 = int64_t(int64_c(j->zSize()) + 2);
378 const int64_t _stride_j_0 = int64_t(j->xStride());
379 const int64_t _stride_j_1 = int64_t(j->yStride());
380 const int64_t _stride_j_2 = int64_t(j->zStride());
381 const int64_t _stride_j_3 = int64_t(1 * int64_t(j->fStride()));
382 const int64_t _stride_rho_0 = int64_t(rho->xStride());
383 const int64_t _stride_rho_1 = int64_t(rho->yStride());
384 const int64_t _stride_rho_2 = int64_t(rho->zStride());
386}
387
388void DiffusiveFluxKernel_double_precision::runOnCellInterval(const shared_ptr<StructuredBlockStorage> &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers, IBlock *block) {
389
390 CellInterval ci = globalCellInterval;
391 CellInterval blockBB = blocks->getBlockCellBB(*block);
392 blockBB.expand(ghostLayers);
393 ci.intersect(blockBB);
394 blocks->transformGlobalToBlockLocalCellInterval(ci, *block);
395 if (ci.empty())
396 return;
397
398 auto j = block->getData<field::GhostLayerField<double, 13>>(jID);
399 auto rho = block->getData<field::GhostLayerField<double, 1>>(rhoID);
400
401 auto &D = this->D_;
402 WALBERLA_ASSERT_GREATER_EQUAL(ci.xMin() - 1, -int_c(j->nrOfGhostLayers()))
403 WALBERLA_ASSERT_GREATER_EQUAL(ci.yMin() - 1, -int_c(j->nrOfGhostLayers()))
404 WALBERLA_ASSERT_GREATER_EQUAL(ci.zMin() - 1, -int_c(j->nrOfGhostLayers()))
405 double *RESTRICT const _data_j = j->dataAt(ci.xMin() - 1, ci.yMin() - 1, ci.zMin() - 1, 0);
406 WALBERLA_ASSERT_GREATER_EQUAL(ci.xMin() - 1, -int_c(rho->nrOfGhostLayers()))
407 WALBERLA_ASSERT_GREATER_EQUAL(ci.yMin() - 1, -int_c(rho->nrOfGhostLayers()))
408 WALBERLA_ASSERT_GREATER_EQUAL(ci.zMin() - 1, -int_c(rho->nrOfGhostLayers()))
409 double *RESTRICT const _data_rho = rho->dataAt(ci.xMin() - 1, ci.yMin() - 1, ci.zMin() - 1, 0);
410 WALBERLA_ASSERT_GREATER_EQUAL(j->xSizeWithGhostLayer(), int64_t(int64_c(ci.xSize()) + 2))
411 const int64_t _size_j_0 = int64_t(int64_c(ci.xSize()) + 2);
412 WALBERLA_ASSERT_GREATER_EQUAL(j->ySizeWithGhostLayer(), int64_t(int64_c(ci.ySize()) + 2))
413 const int64_t _size_j_1 = int64_t(int64_c(ci.ySize()) + 2);
414 WALBERLA_ASSERT_GREATER_EQUAL(j->zSizeWithGhostLayer(), int64_t(int64_c(ci.zSize()) + 2))
415 const int64_t _size_j_2 = int64_t(int64_c(ci.zSize()) + 2);
416 const int64_t _stride_j_0 = int64_t(j->xStride());
417 const int64_t _stride_j_1 = int64_t(j->yStride());
418 const int64_t _stride_j_2 = int64_t(j->zStride());
419 const int64_t _stride_j_3 = int64_t(1 * int64_t(j->fStride()));
420 const int64_t _stride_rho_0 = int64_t(rho->xStride());
421 const int64_t _stride_rho_1 = int64_t(rho->yStride());
422 const int64_t _stride_rho_2 = int64_t(rho->zStride());
424}
425
426} // namespace pystencils
427} // namespace walberla
428
429#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || (defined WALBERLA_CXX_COMPILER_IS_CLANG)
430#pragma GCC diagnostic pop
431#endif
432
433#if (defined WALBERLA_CXX_COMPILER_IS_INTEL)
434#pragma warning pop
435#endif
#define FUNC_PREFIX
\file AdvectiveFluxKernel_double_precision.cpp \author pystencils
#define RESTRICT
\file AdvectiveFluxKernel_double_precision.h \author pystencils
void runOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers, IBlock *block)
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
static double * block(double *p, std::size_t index, std::size_t size)
Definition elc.cpp:172
static FUNC_PREFIX void diffusivefluxkernel_double_precision_diffusivefluxkernel_double_precision(double D, double *RESTRICT const _data_j, double *RESTRICT const _data_rho, int64_t const _size_j_0, int64_t const _size_j_1, int64_t const _size_j_2, int64_t const _stride_j_0, int64_t const _stride_j_1, int64_t const _stride_j_2, int64_t const _stride_j_3, int64_t const _stride_rho_0, int64_t const _stride_rho_1, int64_t const _stride_rho_2)
\file PackInfoPdfDoublePrecision.cpp \author pystencils