ESPResSo
Extensible Simulation Package for Research on Soft Matter Systems
Loading...
Searching...
No Matches
FrictionCouplingKernel_double_precision.cpp
Go to the documentation of this file.
1//======================================================================================================================
2//
3// This file is part of waLBerla. waLBerla is free software: you can
4// redistribute it and/or modify it under the terms of the GNU General Public
5// License as published by the Free Software Foundation, either version 3 of
6// the License, or (at your option) any later version.
7//
8// waLBerla is distributed in the hope that it will be useful, but WITHOUT
9// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11// for more details.
12//
13// You should have received a copy of the GNU General Public License along
14// with waLBerla (see COPYING.txt). If not, see <http://www.gnu.org/licenses/>.
15//
16//! \\file FrictionCouplingKernel_double_precision.cpp
17//! \\ingroup lbm
18//! \\author lbmpy
19//======================================================================================================================
20
21// kernel generated with pystencils v1.2, lbmpy v1.2, lbmpy_walberla/pystencils_walberla from waLBerla commit ref: a839fac6ef7d0c58e7710e4d50490e9dd7146b4a
22
23#include <cmath>
24
26#include "core/DataTypes.h"
27#include "core/Macros.h"
28
29#define FUNC_PREFIX
30
31#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || (defined WALBERLA_CXX_COMPILER_IS_CLANG)
32#pragma GCC diagnostic push
33#pragma GCC diagnostic ignored "-Wfloat-equal"
34#pragma GCC diagnostic ignored "-Wshadow"
35#pragma GCC diagnostic ignored "-Wconversion"
36#pragma GCC diagnostic ignored "-Wunused-variable"
37#endif
38
39#if (defined WALBERLA_CXX_COMPILER_IS_INTEL)
40#pragma warning push
41#pragma warning(disable : 1599)
42#endif
43
44using namespace std;
45
46namespace walberla {
47namespace pystencils {
48
49namespace internal_828cb3fbc90c26a23ae54639862a1401 {
50static FUNC_PREFIX void frictioncouplingkernel_double_precision_frictioncouplingkernel_double_precision(double D, double *RESTRICT _data_f, double *RESTRICT const _data_j, int64_t const _size_f_0, int64_t const _size_f_1, int64_t const _size_f_2, int64_t const _stride_f_0, int64_t const _stride_f_1, int64_t const _stride_f_2, int64_t const _stride_f_3, int64_t const _stride_j_0, int64_t const _stride_j_1, int64_t const _stride_j_2, int64_t const _stride_j_3, double kT) {
51 for (int64_t ctr_2 = 1; ctr_2 < _size_f_2 - 1; ctr_2 += 1) {
52 double *RESTRICT _data_f_20_30 = _data_f + _stride_f_2 * ctr_2;
53 double *RESTRICT _data_j_2m1_36 = _data_j + _stride_j_2 * ctr_2 - _stride_j_2 + 6 * _stride_j_3;
54 double *RESTRICT _data_j_2m1_310 = _data_j + _stride_j_2 * ctr_2 - _stride_j_2 + 10 * _stride_j_3;
55 double *RESTRICT _data_j_2m1_312 = _data_j + _stride_j_2 * ctr_2 - _stride_j_2 + 12 * _stride_j_3;
56 double *RESTRICT _data_j_20_30 = _data_j + _stride_j_2 * ctr_2;
57 double *RESTRICT _data_j_20_310 = _data_j + _stride_j_2 * ctr_2 + 10 * _stride_j_3;
58 double *RESTRICT _data_j_20_311 = _data_j + _stride_j_2 * ctr_2 + 11 * _stride_j_3;
59 double *RESTRICT _data_j_20_312 = _data_j + _stride_j_2 * ctr_2 + 12 * _stride_j_3;
60 double *RESTRICT _data_j_20_33 = _data_j + _stride_j_2 * ctr_2 + 3 * _stride_j_3;
61 double *RESTRICT _data_j_20_34 = _data_j + _stride_j_2 * ctr_2 + 4 * _stride_j_3;
62 double *RESTRICT _data_j_20_35 = _data_j + _stride_j_2 * ctr_2 + 5 * _stride_j_3;
63 double *RESTRICT _data_j_20_36 = _data_j + _stride_j_2 * ctr_2 + 6 * _stride_j_3;
64 double *RESTRICT _data_j_20_39 = _data_j + _stride_j_2 * ctr_2 + 9 * _stride_j_3;
65 double *RESTRICT _data_j_21_35 = _data_j + _stride_j_2 * ctr_2 + _stride_j_2 + 5 * _stride_j_3;
66 double *RESTRICT _data_j_21_39 = _data_j + _stride_j_2 * ctr_2 + _stride_j_2 + 9 * _stride_j_3;
67 double *RESTRICT _data_j_21_311 = _data_j + _stride_j_2 * ctr_2 + _stride_j_2 + 11 * _stride_j_3;
68 double *RESTRICT _data_f_20_31 = _data_f + _stride_f_2 * ctr_2 + _stride_f_3;
69 double *RESTRICT _data_j_2m1_38 = _data_j + _stride_j_2 * ctr_2 - _stride_j_2 + 8 * _stride_j_3;
70 double *RESTRICT _data_j_20_31 = _data_j + _stride_j_2 * ctr_2 + _stride_j_3;
71 double *RESTRICT _data_j_20_37 = _data_j + _stride_j_2 * ctr_2 + 7 * _stride_j_3;
72 double *RESTRICT _data_j_20_38 = _data_j + _stride_j_2 * ctr_2 + 8 * _stride_j_3;
73 double *RESTRICT _data_j_21_37 = _data_j + _stride_j_2 * ctr_2 + _stride_j_2 + 7 * _stride_j_3;
74 double *RESTRICT _data_f_20_32 = _data_f + _stride_f_2 * ctr_2 + 2 * _stride_f_3;
75 double *RESTRICT _data_j_20_32 = _data_j + _stride_j_2 * ctr_2 + 2 * _stride_j_3;
76 double *RESTRICT _data_j_21_32 = _data_j + _stride_j_2 * ctr_2 + _stride_j_2 + 2 * _stride_j_3;
77 for (int64_t ctr_1 = 1; ctr_1 < _size_f_1 - 1; ctr_1 += 1) {
78 double *RESTRICT _data_f_20_30_10 = _stride_f_1 * ctr_1 + _data_f_20_30;
79 double *RESTRICT _data_j_2m1_36_10 = _stride_j_1 * ctr_1 + _data_j_2m1_36;
80 double *RESTRICT _data_j_2m1_310_11 = _stride_j_1 * ctr_1 + _stride_j_1 + _data_j_2m1_310;
81 double *RESTRICT _data_j_2m1_312_1m1 = _stride_j_1 * ctr_1 - _stride_j_1 + _data_j_2m1_312;
82 double *RESTRICT _data_j_20_30_10 = _stride_j_1 * ctr_1 + _data_j_20_30;
83 double *RESTRICT _data_j_20_310_10 = _stride_j_1 * ctr_1 + _data_j_20_310;
84 double *RESTRICT _data_j_20_311_10 = _stride_j_1 * ctr_1 + _data_j_20_311;
85 double *RESTRICT _data_j_20_312_10 = _stride_j_1 * ctr_1 + _data_j_20_312;
86 double *RESTRICT _data_j_20_33_10 = _stride_j_1 * ctr_1 + _data_j_20_33;
87 double *RESTRICT _data_j_20_34_10 = _stride_j_1 * ctr_1 + _data_j_20_34;
88 double *RESTRICT _data_j_20_35_10 = _stride_j_1 * ctr_1 + _data_j_20_35;
89 double *RESTRICT _data_j_20_36_10 = _stride_j_1 * ctr_1 + _data_j_20_36;
90 double *RESTRICT _data_j_20_39_10 = _stride_j_1 * ctr_1 + _data_j_20_39;
91 double *RESTRICT _data_j_20_33_11 = _stride_j_1 * ctr_1 + _stride_j_1 + _data_j_20_33;
92 double *RESTRICT _data_j_20_34_1m1 = _stride_j_1 * ctr_1 - _stride_j_1 + _data_j_20_34;
93 double *RESTRICT _data_j_21_35_10 = _stride_j_1 * ctr_1 + _data_j_21_35;
94 double *RESTRICT _data_j_21_39_11 = _stride_j_1 * ctr_1 + _stride_j_1 + _data_j_21_39;
95 double *RESTRICT _data_j_21_311_1m1 = _stride_j_1 * ctr_1 - _stride_j_1 + _data_j_21_311;
96 double *RESTRICT _data_f_20_31_10 = _stride_f_1 * ctr_1 + _data_f_20_31;
97 double *RESTRICT _data_j_2m1_38_11 = _stride_j_1 * ctr_1 + _stride_j_1 + _data_j_2m1_38;
98 double *RESTRICT _data_j_20_31_10 = _stride_j_1 * ctr_1 + _data_j_20_31;
99 double *RESTRICT _data_j_20_37_10 = _stride_j_1 * ctr_1 + _data_j_20_37;
100 double *RESTRICT _data_j_20_38_10 = _stride_j_1 * ctr_1 + _data_j_20_38;
101 double *RESTRICT _data_j_20_31_11 = _stride_j_1 * ctr_1 + _stride_j_1 + _data_j_20_31;
102 double *RESTRICT _data_j_21_37_11 = _stride_j_1 * ctr_1 + _stride_j_1 + _data_j_21_37;
103 double *RESTRICT _data_f_20_32_10 = _stride_f_1 * ctr_1 + _data_f_20_32;
104 double *RESTRICT _data_j_20_32_10 = _stride_j_1 * ctr_1 + _data_j_20_32;
105 double *RESTRICT _data_j_21_32_10 = _stride_j_1 * ctr_1 + _data_j_21_32;
106 for (int64_t ctr_0 = 1; ctr_0 < _size_f_0 - 1; ctr_0 += 1) {
107 _data_f_20_30_10[_stride_f_0 * ctr_0] = kT * (-1.0 * _data_j_20_30_10[_stride_j_0 * ctr_0 + _stride_j_0] - 1.0 * _data_j_20_30_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_310_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_311_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_312_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_33_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_33_11[_stride_j_0 * ctr_0 + _stride_j_0] - 1.0 * _data_j_20_34_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_34_1m1[_stride_j_0 * ctr_0 + _stride_j_0] - 1.0 * _data_j_20_35_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_36_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_39_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_21_311_1m1[_stride_j_0 * ctr_0 + _stride_j_0] - 1.0 * _data_j_21_35_10[_stride_j_0 * ctr_0 + _stride_j_0] - 1.0 * _data_j_21_39_11[_stride_j_0 * ctr_0 + _stride_j_0] - 1.0 * _data_j_2m1_310_11[_stride_j_0 * ctr_0 + _stride_j_0] - 1.0 * _data_j_2m1_312_1m1[_stride_j_0 * ctr_0 + _stride_j_0] - 1.0 * _data_j_2m1_36_10[_stride_j_0 * ctr_0 + _stride_j_0]) * 0.5 * ((1.0) / (D));
108 _data_f_20_31_10[_stride_f_0 * ctr_0] = kT * (-1.0 * _data_j_20_310_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_31_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_31_11[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_33_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_33_11[_stride_j_0 * ctr_0 + _stride_j_0] - 1.0 * _data_j_20_37_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_38_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_39_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_21_37_11[_stride_j_0 * ctr_0] - 1.0 * _data_j_21_39_11[_stride_j_0 * ctr_0 + _stride_j_0] - 1.0 * _data_j_2m1_310_11[_stride_j_0 * ctr_0 + _stride_j_0] - 1.0 * _data_j_2m1_38_11[_stride_j_0 * ctr_0] + _data_j_20_311_10[_stride_j_0 * ctr_0] + _data_j_20_312_10[_stride_j_0 * ctr_0] + _data_j_20_34_10[_stride_j_0 * ctr_0] + _data_j_20_34_1m1[_stride_j_0 * ctr_0 + _stride_j_0] + _data_j_21_311_1m1[_stride_j_0 * ctr_0 + _stride_j_0] + _data_j_2m1_312_1m1[_stride_j_0 * ctr_0 + _stride_j_0]) * 0.5 * ((1.0) / (D));
109 _data_f_20_32_10[_stride_f_0 * ctr_0] = kT * (-1.0 * _data_j_20_311_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_32_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_35_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_37_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_20_39_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_21_311_1m1[_stride_j_0 * ctr_0 + _stride_j_0] - 1.0 * _data_j_21_32_10[_stride_j_0 * ctr_0] - 1.0 * _data_j_21_35_10[_stride_j_0 * ctr_0 + _stride_j_0] - 1.0 * _data_j_21_37_11[_stride_j_0 * ctr_0] - 1.0 * _data_j_21_39_11[_stride_j_0 * ctr_0 + _stride_j_0] + _data_j_20_310_10[_stride_j_0 * ctr_0] + _data_j_20_312_10[_stride_j_0 * ctr_0] + _data_j_20_36_10[_stride_j_0 * ctr_0] + _data_j_20_38_10[_stride_j_0 * ctr_0] + _data_j_2m1_310_11[_stride_j_0 * ctr_0 + _stride_j_0] + _data_j_2m1_312_1m1[_stride_j_0 * ctr_0 + _stride_j_0] + _data_j_2m1_36_10[_stride_j_0 * ctr_0 + _stride_j_0] + _data_j_2m1_38_11[_stride_j_0 * ctr_0]) * 0.5 * ((1.0) / (D));
110 }
111 }
112 }
113}
114} // namespace internal_828cb3fbc90c26a23ae54639862a1401
115
117 auto f = block->getData<field::GhostLayerField<double, 3>>(fID);
118 auto j = block->getData<field::GhostLayerField<double, 13>>(jID);
119
120 auto &D = this->D_;
121 auto &kT = this->kT_;
122 WALBERLA_ASSERT_GREATER_EQUAL(-1, -int_c(f->nrOfGhostLayers()));
123 double *RESTRICT _data_f = f->dataAt(-1, -1, -1, 0);
124 WALBERLA_ASSERT_GREATER_EQUAL(-1, -int_c(j->nrOfGhostLayers()));
125 double *RESTRICT const _data_j = j->dataAt(-1, -1, -1, 0);
126 WALBERLA_ASSERT_GREATER_EQUAL(f->xSizeWithGhostLayer(), int64_t(cell_idx_c(f->xSize()) + 2));
127 const int64_t _size_f_0 = int64_t(cell_idx_c(f->xSize()) + 2);
128 WALBERLA_ASSERT_GREATER_EQUAL(f->ySizeWithGhostLayer(), int64_t(cell_idx_c(f->ySize()) + 2));
129 const int64_t _size_f_1 = int64_t(cell_idx_c(f->ySize()) + 2);
130 WALBERLA_ASSERT_GREATER_EQUAL(f->zSizeWithGhostLayer(), int64_t(cell_idx_c(f->zSize()) + 2));
131 const int64_t _size_f_2 = int64_t(cell_idx_c(f->zSize()) + 2);
132 const int64_t _stride_f_0 = int64_t(f->xStride());
133 const int64_t _stride_f_1 = int64_t(f->yStride());
134 const int64_t _stride_f_2 = int64_t(f->zStride());
135 const int64_t _stride_f_3 = int64_t(1 * int64_t(f->fStride()));
136 const int64_t _stride_j_0 = int64_t(j->xStride());
137 const int64_t _stride_j_1 = int64_t(j->yStride());
138 const int64_t _stride_j_2 = int64_t(j->zStride());
139 const int64_t _stride_j_3 = int64_t(1 * int64_t(j->fStride()));
140 internal_828cb3fbc90c26a23ae54639862a1401::frictioncouplingkernel_double_precision_frictioncouplingkernel_double_precision(D, _data_f, _data_j, _size_f_0, _size_f_1, _size_f_2, _stride_f_0, _stride_f_1, _stride_f_2, _stride_f_3, _stride_j_0, _stride_j_1, _stride_j_2, _stride_j_3, kT);
141}
142
143void FrictionCouplingKernel_double_precision::runOnCellInterval(const shared_ptr<StructuredBlockStorage> &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers, IBlock *block) {
144 CellInterval ci = globalCellInterval;
145 CellInterval blockBB = blocks->getBlockCellBB(*block);
146 blockBB.expand(ghostLayers);
147 ci.intersect(blockBB);
148 blocks->transformGlobalToBlockLocalCellInterval(ci, *block);
149 if (ci.empty())
150 return;
151
152 auto f = block->getData<field::GhostLayerField<double, 3>>(fID);
153 auto j = block->getData<field::GhostLayerField<double, 13>>(jID);
154
155 auto &D = this->D_;
156 auto &kT = this->kT_;
157 WALBERLA_ASSERT_GREATER_EQUAL(ci.xMin() - 1, -int_c(f->nrOfGhostLayers()));
158 WALBERLA_ASSERT_GREATER_EQUAL(ci.yMin() - 1, -int_c(f->nrOfGhostLayers()));
159 WALBERLA_ASSERT_GREATER_EQUAL(ci.zMin() - 1, -int_c(f->nrOfGhostLayers()));
160 double *RESTRICT _data_f = f->dataAt(ci.xMin() - 1, ci.yMin() - 1, ci.zMin() - 1, 0);
161 WALBERLA_ASSERT_GREATER_EQUAL(ci.xMin() - 1, -int_c(j->nrOfGhostLayers()));
162 WALBERLA_ASSERT_GREATER_EQUAL(ci.yMin() - 1, -int_c(j->nrOfGhostLayers()));
163 WALBERLA_ASSERT_GREATER_EQUAL(ci.zMin() - 1, -int_c(j->nrOfGhostLayers()));
164 double *RESTRICT const _data_j = j->dataAt(ci.xMin() - 1, ci.yMin() - 1, ci.zMin() - 1, 0);
165 WALBERLA_ASSERT_GREATER_EQUAL(f->xSizeWithGhostLayer(), int64_t(cell_idx_c(ci.xSize()) + 2));
166 const int64_t _size_f_0 = int64_t(cell_idx_c(ci.xSize()) + 2);
167 WALBERLA_ASSERT_GREATER_EQUAL(f->ySizeWithGhostLayer(), int64_t(cell_idx_c(ci.ySize()) + 2));
168 const int64_t _size_f_1 = int64_t(cell_idx_c(ci.ySize()) + 2);
169 WALBERLA_ASSERT_GREATER_EQUAL(f->zSizeWithGhostLayer(), int64_t(cell_idx_c(ci.zSize()) + 2));
170 const int64_t _size_f_2 = int64_t(cell_idx_c(ci.zSize()) + 2);
171 const int64_t _stride_f_0 = int64_t(f->xStride());
172 const int64_t _stride_f_1 = int64_t(f->yStride());
173 const int64_t _stride_f_2 = int64_t(f->zStride());
174 const int64_t _stride_f_3 = int64_t(1 * int64_t(f->fStride()));
175 const int64_t _stride_j_0 = int64_t(j->xStride());
176 const int64_t _stride_j_1 = int64_t(j->yStride());
177 const int64_t _stride_j_2 = int64_t(j->zStride());
178 const int64_t _stride_j_3 = int64_t(1 * int64_t(j->fStride()));
179 internal_828cb3fbc90c26a23ae54639862a1401::frictioncouplingkernel_double_precision_frictioncouplingkernel_double_precision(D, _data_f, _data_j, _size_f_0, _size_f_1, _size_f_2, _stride_f_0, _stride_f_1, _stride_f_2, _stride_f_3, _stride_j_0, _stride_j_1, _stride_j_2, _stride_j_3, kT);
180}
181
182} // namespace pystencils
183} // namespace walberla
184
185#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || (defined WALBERLA_CXX_COMPILER_IS_CLANG)
186#pragma GCC diagnostic pop
187#endif
188
189#if (defined WALBERLA_CXX_COMPILER_IS_INTEL)
190#pragma warning pop
191#endif
#define FUNC_PREFIX
\file AdvectiveFluxKernel_double_precision.cpp \ingroup lbm \author lbmpy
#define RESTRICT
\file AdvectiveFluxKernel_double_precision.h \author pystencils
float f[3]
void runOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers, IBlock *block)
static double * block(double *p, std::size_t index, std::size_t size)
Definition elc.cpp:174
static FUNC_PREFIX void frictioncouplingkernel_double_precision_frictioncouplingkernel_double_precision(double D, double *RESTRICT _data_f, double *RESTRICT const _data_j, int64_t const _size_f_0, int64_t const _size_f_1, int64_t const _size_f_2, int64_t const _stride_f_0, int64_t const _stride_f_1, int64_t const _stride_f_2, int64_t const _stride_f_3, int64_t const _stride_j_0, int64_t const _stride_j_1, int64_t const _stride_j_2, int64_t const _stride_j_3, double kT)