ESPResSo
Extensible Simulation Package for Research on Soft Matter Systems
Loading...
Searching...
No Matches
FixedFlux_double_precision.cpp
Go to the documentation of this file.
1//======================================================================================================================
2//
3// This file is part of waLBerla. waLBerla is free software: you can
4// redistribute it and/or modify it under the terms of the GNU General Public
5// License as published by the Free Software Foundation, either version 3 of
6// the License, or (at your option) any later version.
7//
8// waLBerla is distributed in the hope that it will be useful, but WITHOUT
9// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11// for more details.
12//
13// You should have received a copy of the GNU General Public License along
14// with waLBerla (see COPYING.txt). If not, see <http://www.gnu.org/licenses/>.
15//
16//! \\file FixedFlux_double_precision.cpp
17//! \\author pystencils
18//======================================================================================================================
19
20// kernel generated with pystencils v1.3.3, lbmpy v1.3.3, lbmpy_walberla/pystencils_walberla from waLBerla commit b0842e1a493ce19ef1bbb8d2cf382fc343970a7f
21
23#include "core/DataTypes.h"
24#include "core/Macros.h"
25
26#define FUNC_PREFIX
27
28using namespace std;
29
30namespace walberla {
31namespace pystencils {
32
33#ifdef __GNUC__
34#pragma GCC diagnostic push
35#pragma GCC diagnostic ignored "-Wstrict-aliasing"
36#pragma GCC diagnostic ignored "-Wunused-variable"
37#pragma GCC diagnostic ignored "-Wconversion"
38#endif
39
40#ifdef __CUDACC__
41#pragma push
42#ifdef __NVCC_DIAG_PRAGMA_SUPPORT__
43#pragma nv_diag_suppress 177
44#else
45#pragma diag_suppress 177
46#endif
47#endif
48// NOLINTBEGIN(readability-non-const-parameter*)
49namespace internal_bdec58bfeb737088cd218660bd069d85 {
50static FUNC_PREFIX void fixedflux_double_precision_boundary_FixedFlux_double_precision(double *RESTRICT const _data_flux, uint8_t *RESTRICT const _data_indexVector, int64_t const _stride_flux_0, int64_t const _stride_flux_1, int64_t const _stride_flux_2, int64_t const _stride_flux_3, int32_t indexVectorSize) {
51 for (int64_t ctr_0 = 0; ctr_0 < indexVectorSize; ctr_0 += 1) {
52 const int32_t x = *((int32_t *)(&_data_indexVector[40 * ctr_0]));
53 const int32_t y = *((int32_t *)(&_data_indexVector[40 * ctr_0 + 4]));
54 const int32_t z = *((int32_t *)(&_data_indexVector[40 * ctr_0 + 8]));
55
56 const int32_t cx[] = {0, 0, 0, -1, 1, 0, 0, -1, 1, -1, 1, 0, 0, -1, 1, 0, 0, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1};
57 const int32_t cy[] = {0, 1, -1, 0, 0, 0, 0, 1, 1, -1, -1, 1, -1, 0, 0, 1, -1, 0, 0, 1, 1, -1, -1, 1, 1, -1, -1};
58 const int32_t cz[] = {0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1};
59 const int32_t invdir[] = {0, 2, 1, 4, 3, 6, 5, 10, 9, 8, 7, 16, 15, 18, 17, 12, 11, 14, 13, 26, 25, 24, 23, 22, 21, 20, 19};
60
61 const int32_t dir = *((int32_t *)(&_data_indexVector[40 * ctr_0 + 12]));
62 if (((dir) == (26))) {
63 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_2 * z + 9 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) - 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24])) - 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
64 } else {
65 if (((dir) == (25))) {
66 _data_flux[_stride_flux_0 * x + _stride_flux_0 + _stride_flux_1 * y - _stride_flux_1 + _stride_flux_2 * z - _stride_flux_2 + 12 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) + 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24])) + 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
67 } else {
68 if (((dir) == (24))) {
69 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_2 * z + 11 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) - 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32])) + 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24]));
70 } else {
71 if (((dir) == (23))) {
72 _data_flux[_stride_flux_0 * x + _stride_flux_0 + _stride_flux_1 * y + _stride_flux_1 + _stride_flux_2 * z - _stride_flux_2 + 10 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) - 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24])) + 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
73 } else {
74 if (((dir) == (22))) {
75 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_2 * z + 10 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) - 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24])) + 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
76 } else {
77 if (((dir) == (21))) {
78 _data_flux[_stride_flux_0 * x + _stride_flux_0 + _stride_flux_1 * y - _stride_flux_1 + _stride_flux_2 * z + _stride_flux_2 + 11 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) - 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32])) + 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24]));
79 } else {
80 if (((dir) == (20))) {
81 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_2 * z + 12 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) + 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24])) + 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
82 } else {
83 if (((dir) == (19))) {
84 _data_flux[_stride_flux_0 * x + _stride_flux_0 + _stride_flux_1 * y + _stride_flux_1 + _stride_flux_2 * z + _stride_flux_2 + 9 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) - 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24])) - 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
85 } else {
86 if (((dir) == (18))) {
87 _data_flux[_stride_flux_0 * x + _stride_flux_0 + _stride_flux_1 * y + _stride_flux_2 * z - _stride_flux_2 + 6 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) + 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
88 } else {
89 if (((dir) == (17))) {
90 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_2 * z + 5 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) - 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
91 } else {
92 if (((dir) == (16))) {
93 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_2 * z + 7 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24])) - 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
94 } else {
95 if (((dir) == (15))) {
96 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_1 + _stride_flux_2 * z - _stride_flux_2 + 8 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24])) + 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
97 } else {
98 if (((dir) == (14))) {
99 _data_flux[_stride_flux_0 * x + _stride_flux_0 + _stride_flux_1 * y + _stride_flux_2 * z + _stride_flux_2 + 5 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) - 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
100 } else {
101 if (((dir) == (13))) {
102 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_2 * z + 6 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) + 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
103 } else {
104 if (((dir) == (12))) {
105 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_2 * z + 8 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24])) + 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
106 } else {
107 if (((dir) == (11))) {
108 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_1 + _stride_flux_2 * z + _stride_flux_2 + 7 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24])) - 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
109 } else {
110 if (((dir) == (10))) {
111 _data_flux[_stride_flux_0 * x + _stride_flux_0 + _stride_flux_1 * y - _stride_flux_1 + _stride_flux_2 * z + 4 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) + 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24]));
112 } else {
113 if (((dir) == (9))) {
114 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_2 * z + 3 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) - 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24]));
115 } else {
116 if (((dir) == (8))) {
117 _data_flux[_stride_flux_0 * x + _stride_flux_0 + _stride_flux_1 * y + _stride_flux_1 + _stride_flux_2 * z + 3 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) - 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24]));
118 } else {
119 if (((dir) == (7))) {
120 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_2 * z + 4 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16])) + 0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24]));
121 } else {
122 if (((dir) == (6))) {
123 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_2 * z + 2 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
124 } else {
125 if (((dir) == (5))) {
126 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_2 * z + _stride_flux_2 + 2 * _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 32]));
127 } else {
128 if (((dir) == (4))) {
129 _data_flux[_stride_flux_0 * x + _stride_flux_0 + _stride_flux_1 * y + _stride_flux_2 * z] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16]));
130 } else {
131 if (((dir) == (3))) {
132 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_2 * z] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 16]));
133 } else {
134 if (((dir) == (2))) {
135 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_2 * z + _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24]));
136 } else {
137 if (((dir) == (1))) {
138 _data_flux[_stride_flux_0 * x + _stride_flux_1 * y + _stride_flux_1 + _stride_flux_2 * z + _stride_flux_3] = -0.1111111111111111 * *((double *)(&_data_indexVector[40 * ctr_0 + 24]));
139 }
140 }
141 }
142 }
143 }
144 }
145 }
146 }
147 }
148 }
149 }
150 }
151 }
152 }
153 }
154 }
155 }
156 }
157 }
158 }
159 }
160 }
161 }
162 }
163 }
164 }
165 }
166}
167} // namespace internal_bdec58bfeb737088cd218660bd069d85
168
169// NOLINTEND(readability-non-const-parameter*)
170#ifdef __GNUC__
171#pragma GCC diagnostic pop
172#endif
173
174#ifdef __CUDACC__
175#pragma pop
176#endif
177
178void FixedFlux_double_precision::run_impl(IBlock *block, IndexVectors::Type type) {
179 auto *indexVectors = block->getData<IndexVectors>(indexVectorID);
180 int32_t indexVectorSize = int32_c(indexVectors->indexVector(type).size());
181 if (indexVectorSize == 0)
182 return;
183
184 auto pointer = indexVectors->pointerCpu(type);
185
186 uint8_t *_data_indexVector = reinterpret_cast<uint8_t *>(pointer);
187
188 auto flux = block->getData<field::GhostLayerField<double, 13>>(fluxID);
189
190 WALBERLA_ASSERT_GREATER_EQUAL(0, -int_c(flux->nrOfGhostLayers()))
191 double *RESTRICT const _data_flux = flux->dataAt(0, 0, 0, 0);
192 const int64_t _stride_flux_0 = int64_t(flux->xStride());
193 const int64_t _stride_flux_1 = int64_t(flux->yStride());
194 const int64_t _stride_flux_2 = int64_t(flux->zStride());
195 const int64_t _stride_flux_3 = int64_t(1 * int64_t(flux->fStride()));
196 internal_bdec58bfeb737088cd218660bd069d85::fixedflux_double_precision_boundary_FixedFlux_double_precision(_data_flux, _data_indexVector, _stride_flux_0, _stride_flux_1, _stride_flux_2, _stride_flux_3, indexVectorSize);
197}
198
200 run_impl(block, IndexVectors::ALL);
201}
202
206
210
211} // namespace pystencils
212} // namespace walberla
#define FUNC_PREFIX
\file AdvectiveFluxKernel_double_precision.cpp \author pystencils
#define RESTRICT
\file AdvectiveFluxKernel_double_precision.h \author pystencils
static double * block(double *p, std::size_t index, std::size_t size)
Definition elc.cpp:172
static FUNC_PREFIX void fixedflux_double_precision_boundary_FixedFlux_double_precision(double *RESTRICT const _data_flux, uint8_t *RESTRICT const _data_indexVector, int64_t const _stride_flux_0, int64_t const _stride_flux_1, int64_t const _stride_flux_2, int64_t const _stride_flux_3, int32_t indexVectorSize)