24#include <cuda_runtime.h>
30#if defined(OMPI_MPI_H) || defined(_MPI_H)
31#error CU-file includes mpi.h! This should not happen!
67 auto const [node,
hostname] = detail::get_node_info();
91 auto const deleter = [](
int *p) {
cudaFree(
reinterpret_cast<void *
>(p)); };
95 std::unique_ptr<
int,
decltype(deleter)> d(ptr, deleter);
111 " is not capable of running ESPResSo.");
Wrapper for CUDA runtime exceptions.
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
static const int computeCapabilityMinMajor
EspressoGpuDevice cuda_get_device_props(const int dev)
Get properties of a CUDA device.
void cuda_check_device()
Check that a device is available, that its compute capability is sufficient for ESPResSo,...
bool cuda_check_gpu_compute_capability(int dev)
Check that a given GPU has compute capability.
bool cuda_test_device_access()
Test if communication to the CUDA device works.
static const int computeCapabilityMinMinor
std::string cuda_get_gpu_name(int dev)
Get the name of a CUDA device.
int cuda_get_device()
Get the current CUDA device.
void cuda_set_device(int dev)
Choose a device for future CUDA computations.
int cuda_get_n_gpus()
Get the number of CUDA devices on the local host.
void cuda_init()
Initializes the CUDA stream.
Struct to hold information relevant to ESPResSo about GPUs.
#define CUDA_CHECK(statement)
Convert CUDA error codes into runtime errors.