24#include <cuda_runtime.h>
30#if defined(OMPI_MPI_H) || defined(_MPI_H)
31#error CU-file includes mpi.h! This should not happen!
51 cudaDeviceProp deviceProp;
52 CUDA_CHECK(cudaGetDeviceProperties(&deviceProp, dev))
59 cudaDeviceProp deviceProp;
60 CUDA_CHECK(cudaGetDeviceProperties(&deviceProp, dev))
61 return {deviceProp.name};
65 cudaDeviceProp deviceProp;
66 CUDA_CHECK(cudaGetDeviceProperties(&deviceProp, dev))
67 auto const [node, hostname] = detail::get_node_info();
74 deviceProp.totalGlobalMem,
75 deviceProp.multiProcessorCount};
91 auto const deleter = [](
int *p) { cudaFree(
reinterpret_cast<void *
>(p)); };
94 CUDA_CHECK(cudaMalloc(
reinterpret_cast<void **
>(&ptr),
sizeof(
int)));
95 std::unique_ptr<int,
decltype(deleter)> d(ptr, deleter);
96 CUDA_CHECK(cudaMemcpy(d.get(), &h,
sizeof(
int), cudaMemcpyHostToDevice));
98 CUDA_CHECK(cudaMemcpy(&h, d.get(),
sizeof(
int), cudaMemcpyDeviceToHost));
109 if (incompatible or communication_failure) {
111 " is not capable of running ESPResSo.");
Wrapper for CUDA runtime exceptions.
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.
EspressoGpuDevice cuda_get_device_props(const int dev)
Get properties of a CUDA device.
void cuda_check_device()
Check that a device is available, that its compute capability is sufficient for ESPResSo,...
bool cuda_check_gpu_compute_capability(int dev)
Check that a given GPU has compute capability.
static int constexpr computeCapabilityMinMajor
bool cuda_test_device_access()
Test if communication to the CUDA device works.
std::string cuda_get_gpu_name(int dev)
Get the name of a CUDA device.
int cuda_get_device()
Get the current CUDA device.
static int constexpr computeCapabilityMinMinor
void cuda_set_device(int dev)
Choose a device for future CUDA computations.
int cuda_get_n_gpus()
Get the number of CUDA devices on the local host.
void cuda_init()
Initializes the CUDA stream.
Struct to hold information relevant to ESPResSo about GPUs.
#define CUDA_CHECK(statement)
Convert CUDA error codes into runtime errors.