28#if defined(ESPRESSO_CUDA) && defined(ESPRESSO_WALBERLA)
33#include <unordered_map>
45 if (
context()->is_head_node()) {
58 if (
name ==
"list_devices") {
59 std::unordered_map<int, std::string>
devices{};
61 if (
context()->is_head_node()) {
65 for (
int i = 0; i <
n_gpus; ++i) {
73 if (
name ==
"list_devices_properties") {
74 std::unordered_map<std::string, std::unordered_map<int, Variant>>
dict{};
84 {
"compute_capability",
86 {
dev.compute_capability_major,
dev.compute_capability_minor}}}},
87 {
"cores",
dev.n_cores},
88 {
"total_memory",
dev.total_memory},
95 if (
name ==
"get_n_gpus") {
103 if (
name ==
"is_mpi_gpu_aware") {
104 return ::communication_environment->is_mpi_gpu_aware();
106#if defined(ESPRESSO_CUDA) && defined(ESPRESSO_WALBERLA)
107 if (
name ==
"set_device_id_per_rank") {
void add_parameters(std::vector< AutoParameter > &¶ms)
virtual bool is_head_node() const =0
Context * context() const
Responsible context.
std::string_view name() const
Variant do_call_method(std::string const &name, VariantMap const ¶meters) override
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
This file contains the asynchronous MPI communication.
void invoke_skip_cuda_exceptions(F &&f, Args &&...args)
Invoke a function and silently ignore any thrown cuda_runtime_error error.
std::vector< EspressoGpuDevice > cuda_gather_gpus()
Gather list of CUDA devices from all nodes on the head node.
std::string cuda_get_gpu_name(int dev)
Get the name of a CUDA device.
int cuda_get_device()
Get the current CUDA device.
void cuda_set_device(int dev)
Choose a device for future CUDA computations.
int cuda_get_n_gpus()
Get the number of CUDA devices on the local host.
void set_device_id_per_rank()
std::unordered_map< std::string, Variant > VariantMap
auto make_unordered_map_of_variants(std::unordered_map< K, V > const &v)
Recursive variant implementation.