25#include "core/DataTypes.h"
26#include "core/logging/Logging.h"
28#include "gpu/GPUField.h"
29#include "gpu/GPUWrapper.h"
31#include "domain_decomposition/BlockDataID.h"
32#include "domain_decomposition/IBlock.h"
33#include "domain_decomposition/StructuredBlockStorage.h"
34#include "field/SwapableCompare.h"
37#include <unordered_map>
40#define RESTRICT __restrict__
42#define RESTRICT __restrict
47#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || \
48 (defined WALBERLA_CXX_COMPILER_IS_CLANG)
49#pragma GCC diagnostic push
50#pragma GCC diagnostic ignored "-Wunused-parameter"
51#pragma GCC diagnostic ignored "-Wreorder"
60 BlockDataID jID_, BlockDataID phiID_, BlockDataID rhoID_,
float D,
61 float f_ext_0,
float f_ext_1,
float f_ext_2,
float kT,
float z)
62 : jID(jID_), phiID(phiID_), rhoID(rhoID_), D_(D), f_ext_0_(f_ext_0),
63 f_ext_1_(f_ext_1), f_ext_2_(f_ext_2), kT_(kT), z_(z) {}
68 const CellInterval &globalCellInterval,
69 cell_idx_t ghostLayers, IBlock *
block,
70 gpuStream_t
stream =
nullptr);
79 return [kernel](IBlock *b) { kernel->run(b); };
85 const shared_ptr<StructuredBlockStorage> &blocks,
86 const CellInterval &globalCellInterval, cell_idx_t ghostLayers = 1) {
87 return [kernel, blocks, globalCellInterval,
88 ghostLayers](IBlock *b, gpuStream_t
stream =
nullptr) {
89 kernel->runOnCellInterval(blocks, globalCellInterval, ghostLayers, b,
98 std::function<void(IBlock *)>
100 const CellInterval &globalCellInterval,
101 cell_idx_t ghostLayers = 1,
102 gpuStream_t
stream =
nullptr) {
103 return [
this, blocks, globalCellInterval, ghostLayers,
stream](IBlock *b) {
109 void configure(
const shared_ptr<StructuredBlockStorage> & ,
112 inline float getD()
const {
return D_; }
116 inline float getKt()
const {
return kT_; }
117 inline float getZ()
const {
return z_; }
118 inline void setD(
const float value) { D_ = value; }
119 inline void setF_ext_0(
const float value) { f_ext_0_ = value; }
120 inline void setF_ext_1(
const float value) { f_ext_1_ = value; }
121 inline void setF_ext_2(
const float value) { f_ext_2_ = value; }
122 inline void setKt(
const float value) { kT_ = value; }
123 inline void setZ(
const float value) { z_ = value; }
130 inline void setPhiID(BlockDataID phiID_) { phiID = phiID_; }
145#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || \
146 (defined WALBERLA_CXX_COMPILER_IS_CLANG)
147#pragma GCC diagnostic pop
void runOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers, IBlock *block, gpuStream_t stream=nullptr)
void setPhiID(BlockDataID phiID_)
DiffusiveFluxKernelWithElectrostatic_single_precision_CUDA(BlockDataID jID_, BlockDataID phiID_, BlockDataID rhoID_, float D, float f_ext_0, float f_ext_1, float f_ext_2, float kT, float z)
void setF_ext_2(const float value)
std::function< void(IBlock *)> getSweep(gpuStream_t stream=nullptr)
void setKt(const float value)
void setZ(const float value)
void setD(const float value)
void operator()(IBlock *block, gpuStream_t stream=nullptr)
std::function< void(IBlock *)> getSweepOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers=1, gpuStream_t stream=nullptr)
static std::function< void(IBlock *, gpuStream_t)> getSweepOnCellInterval(const shared_ptr< DiffusiveFluxKernelWithElectrostatic_single_precision_CUDA > &kernel, const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers=1)
void setF_ext_0(const float value)
void run(IBlock *block, gpuStream_t stream=nullptr)
void setF_ext_1(const float value)
static std::function< void(IBlock *)> getSweep(const shared_ptr< DiffusiveFluxKernelWithElectrostatic_single_precision_CUDA > &kernel)
void configure(const shared_ptr< StructuredBlockStorage > &, IBlock *)
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
static double * block(double *p, std::size_t index, std::size_t size)
\file PackInfoPdfDoublePrecision.cpp \author pystencils