25#include "core/DataTypes.h"
26#include "core/logging/Logging.h"
28#include "gpu/GPUField.h"
29#include "gpu/GPUWrapper.h"
31#include "domain_decomposition/BlockDataID.h"
32#include "domain_decomposition/IBlock.h"
33#include "domain_decomposition/StructuredBlockStorage.h"
34#include "field/SwapableCompare.h"
37#include <unordered_map>
40#define RESTRICT __restrict__
42#define RESTRICT __restrict
47#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || \
48 (defined WALBERLA_CXX_COMPILER_IS_CLANG)
49#pragma GCC diagnostic push
50#pragma GCC diagnostic ignored "-Wunused-parameter"
51#pragma GCC diagnostic ignored "-Wreorder"
60 BlockDataID velocityID_)
61 : forceID(forceID_), pdfsID(pdfsID_), velocityID(velocityID_) {}
64 for (
auto p : cache_pdfs_) {
72 const CellInterval &globalCellInterval,
73 cell_idx_t ghostLayers, IBlock *
block,
74 gpuStream_t
stream =
nullptr);
80 static std::function<void(IBlock *)>
81 getSweep(
const shared_ptr<StreamSweepDoublePrecisionCUDA> &kernel) {
82 return [kernel](IBlock *b) { kernel->run(b); };
86 const shared_ptr<StreamSweepDoublePrecisionCUDA> &kernel,
87 const shared_ptr<StructuredBlockStorage> &blocks,
88 const CellInterval &globalCellInterval, cell_idx_t ghostLayers = 1) {
89 return [kernel, blocks, globalCellInterval,
90 ghostLayers](IBlock *b, gpuStream_t
stream =
nullptr) {
91 kernel->runOnCellInterval(blocks, globalCellInterval, ghostLayers, b,
100 std::function<void(IBlock *)>
102 const CellInterval &globalCellInterval,
103 cell_idx_t ghostLayers = 1,
104 gpuStream_t
stream =
nullptr) {
105 return [
this, blocks, globalCellInterval, ghostLayers,
stream](IBlock *b) {
111 void configure(
const shared_ptr<StructuredBlockStorage> & ,
117 BlockDataID velocityID;
118 std::unordered_map<IBlock *, gpu::GPUField<double> *> cache_pdfs_;
124#if (defined WALBERLA_CXX_COMPILER_IS_GNU) || \
125 (defined WALBERLA_CXX_COMPILER_IS_CLANG)
126#pragma GCC diagnostic pop
static std::function< void(IBlock *, gpuStream_t)> getSweepOnCellInterval(const shared_ptr< StreamSweepDoublePrecisionCUDA > &kernel, const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers=1)
void run(IBlock *block, gpuStream_t stream=nullptr)
void configure(const shared_ptr< StructuredBlockStorage > &, IBlock *)
std::function< void(IBlock *)> getSweep(gpuStream_t stream=nullptr)
StreamSweepDoublePrecisionCUDA(BlockDataID forceID_, BlockDataID pdfsID_, BlockDataID velocityID_)
void operator()(IBlock *block, gpuStream_t stream=nullptr)
static std::function< void(IBlock *)> getSweep(const shared_ptr< StreamSweepDoublePrecisionCUDA > &kernel)
void runOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers, IBlock *block, gpuStream_t stream=nullptr)
std::function< void(IBlock *)> getSweepOnCellInterval(const shared_ptr< StructuredBlockStorage > &blocks, const CellInterval &globalCellInterval, cell_idx_t ghostLayers=1, gpuStream_t stream=nullptr)
~StreamSweepDoublePrecisionCUDA()
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
static double * block(double *p, std::size_t index, std::size_t size)
\file PackInfoPdfDoublePrecision.cpp \author pystencils