26#include <unordered_map>
29template <
typename Key,
typename Value>
class Cache {
30 using map_type = std::unordered_map<Key, typename std::add_const_t<Value>>;
39 m_max_size = m_cache.max_size();
50 std::minstd_rand m_rand;
60 void drop_random_element() {
70 std::uniform_int_distribution<size_type>{0,
bucket_count - 1}(m_rand);
72 while (0 == m_cache.bucket_size(
bucket)) {
78 auto const elem_index = std::uniform_int_distribution<size_type>{
79 0, m_cache.bucket_size(
bucket) - 1}(m_rand);
97 bool has(
Key const &k)
const {
return m_cache.find(k) != m_cache.end(); }
117 if ((m_cache.size() >= m_max_size) && !
has(k)) {
118 drop_random_element();
126 typename map_type::const_iterator
it;
127 std::tie(
it, std::ignore) = m_cache.emplace(k, std::forward<ValueRef>(v));
129 return &(
it->second);
147 template <
typename KeyInputIterator,
typename ValueInputIterator>
157 drop_random_element();
174 auto const needle = m_cache.find(k);
176 if (m_cache.end() !=
needle) {
bool has(Key const &k) const
Query if k is contained in the cache.
void invalidate()
Clear the cache.
typename map_type::size_type size_type
size_type size() const
Number of elements currently cached.
Cache(size_type max_size)
KeyInputIterator put(KeyInputIterator kbegin, KeyInputIterator kend, ValueInputIterator vbegin)
Put a range of values into the cache.
Value const * put(Key const &k, ValueRef &&v)
Put a value into the cache.
Value const * get(Key const &k) const
Get a value.
size_type max_size() const
Maximal size of the cache.
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.