Itoyori  v0.0.1
cache_system.hpp
Go to the documentation of this file.
1 #pragma once
2 
3 #include <cstdio>
4 #include <cstdlib>
5 #include <cstdarg>
6 #include <cstdint>
7 #include <vector>
8 #include <list>
9 #include <limits>
10 #include <iterator>
11 
12 #include "ityr/common/util.hpp"
13 
14 #if __has_include(<ankerl/unordered_dense.h>)
15 #include <ankerl/unordered_dense.h>
16 namespace ityr::ori {
17 template <typename Key, typename Value>
18 using unordered_map = ankerl::unordered_dense::map<Key, Value>;
19 }
20 #else
21 #include <unordered_map>
22 namespace ityr::ori {
23 template <typename Key, typename Value>
24 using unordered_map = std::unordered_map<Key, Value>;
25 }
26 #endif
27 
28 namespace ityr::ori {
29 
30 using cache_entry_idx_t = int;
31 
32 class cache_full_exception : public std::exception {};
33 
34 template <typename Key, typename Entry>
35 class cache_system {
36 public:
37  cache_system(cache_entry_idx_t nentries) : cache_system(nentries, Entry{}) {}
38  cache_system(cache_entry_idx_t nentries, const Entry& e)
39  : nentries_(nentries),
40  entry_initial_state_(e),
41  entries_(init_entries()),
42  lru_(init_lru()),
43  table_(init_table()) {}
44 
45  cache_entry_idx_t num_entries() const { return nentries_; }
46 
47  bool is_cached(Key key) const {
48  return table_.find(key) != table_.end();
49  }
50 
51  template <bool UpdateLRU = true>
52  Entry& ensure_cached(Key key) {
53  auto it = table_.find(key);
54  if (it == table_.end()) {
55  cache_entry_idx_t idx = get_empty_slot();
56  cache_entry& ce = entries_[idx];
57 
58  ce.entry.on_cache_map(idx);
59 
60  ce.allocated = true;
61  ce.key = key;
62  table_[key] = idx;
63  if constexpr (UpdateLRU) {
64  move_to_back_lru(ce);
65  }
66  return ce.entry;
67  } else {
68  cache_entry_idx_t idx = it->second;
69  cache_entry& ce = entries_[idx];
70  if constexpr (UpdateLRU) {
71  move_to_back_lru(ce);
72  }
73  return ce.entry;
74  }
75  }
76 
77  void ensure_evicted(Key key) {
78  auto it = table_.find(key);
79  if (it != table_.end()) {
80  cache_entry_idx_t idx = it->second;
81  cache_entry& ce = entries_[idx];
82  ITYR_CHECK(ce.entry.is_evictable());
83  ce.entry.on_evict();
84  ce.key = {};
85  ce.entry = entry_initial_state_;
86  table_.erase(key);
87  ce.allocated = false;
88  }
89  }
90 
91  template <typename Func>
92  void for_each_entry(Func&& f) {
93  for (auto& ce : entries_) {
94  if (ce.allocated) {
95  f(ce.entry);
96  }
97  }
98  }
99 
100 private:
101  struct cache_entry {
102  bool allocated;
103  Key key;
104  Entry entry;
106  typename std::list<cache_entry_idx_t>::iterator lru_it;
107 
108  cache_entry(const Entry& e) : entry(e) {}
109  };
110 
111  std::vector<cache_entry> init_entries() {
112  std::vector<cache_entry> entries;
113  for (cache_entry_idx_t idx = 0; idx < nentries_; idx++) {
114  cache_entry& ce = entries.emplace_back(entry_initial_state_);
115  ce.allocated = false;
116  ce.idx = idx;
117  }
118  return entries;
119  }
120 
121  std::list<cache_entry_idx_t> init_lru() {
122  std::list<cache_entry_idx_t> lru;
123  for (auto& ce : entries_) {
124  lru.push_back(ce.idx);
125  ce.lru_it = std::prev(lru.end());
126  ITYR_CHECK(*ce.lru_it == ce.idx);
127  }
128  return lru;
129  }
130 
131  unordered_map<Key, cache_entry_idx_t> init_table() {
132  unordered_map<Key, cache_entry_idx_t> table;
133  // To improve performance of the hash table
134  table.reserve(nentries_);
135  return table;
136  }
137 
138  void move_to_back_lru(cache_entry& ce) {
139  lru_.splice(lru_.end(), lru_, ce.lru_it);
140  ITYR_CHECK(std::prev(lru_.end()) == ce.lru_it);
141  ITYR_CHECK(*ce.lru_it == ce.idx);
142  }
143 
144  cache_entry_idx_t get_empty_slot() {
145  // FIXME: Performance issue?
146  for (const auto& idx : lru_) {
147  cache_entry& ce = entries_[idx];
148  if (!ce.allocated) {
149  return ce.idx;
150  }
151  if (ce.entry.is_evictable()) {
152  Key prev_key = ce.key;
153  table_.erase(prev_key);
154  ce.entry.on_evict();
155  ce.allocated = false;
156  return ce.idx;
157  }
158  }
159  throw cache_full_exception{};
160  }
161 
162  cache_entry_idx_t nentries_;
163  Entry entry_initial_state_;
164  std::vector<cache_entry> entries_; // index (cache_entry_idx_t) -> entry (cache_entry)
165  std::list<cache_entry_idx_t> lru_; // front (oldest) <----> back (newest)
166  unordered_map<Key, cache_entry_idx_t> table_; // hash table (Key -> cache_entry_idx_t)
167 };
168 
169 ITYR_TEST_CASE("[ityr::ori::cache_system] testing cache system") {
170  using key_t = int;
171  struct test_entry {
172  bool evictable = true;
174 
175  bool is_evictable() const { return evictable; }
176  void on_evict() {}
177  void on_cache_map(cache_entry_idx_t idx) { entry_idx = idx; }
178  };
179 
180  int nelems = 100;
181  cache_system<key_t, test_entry> cs(nelems);
182 
183  int nkey = 1000;
184  std::vector<key_t> keys;
185  for (int i = 0; i < nkey; i++) {
186  keys.push_back(i);
187  }
188 
189  ITYR_SUBCASE("basic test") {
190  for (key_t k : keys) {
191  test_entry& e = cs.ensure_cached(k);
192  ITYR_CHECK(cs.is_cached(k));
193  for (int i = 0; i < 10; i++) {
194  test_entry& e2 = cs.ensure_cached(k);
195  ITYR_CHECK(e.entry_idx == e2.entry_idx);
196  }
197  }
198  }
199 
200  ITYR_SUBCASE("all entries should be cached when the number of entries is small enough") {
201  for (int i = 0; i < nelems; i++) {
202  cs.ensure_cached(keys[i]);
203  ITYR_CHECK(cs.is_cached(keys[i]));
204  }
205  for (int i = 0; i < nelems; i++) {
206  cs.ensure_cached(keys[i]);
207  ITYR_CHECK(cs.is_cached(keys[i]));
208  for (int j = 0; j < nelems; j++) {
209  ITYR_CHECK(cs.is_cached(keys[j]));
210  }
211  }
212  }
213 
214  ITYR_SUBCASE("nonevictable entries should not be evicted") {
215  int nrem = 50;
216  for (int i = 0; i < nrem; i++) {
217  test_entry& e = cs.ensure_cached(keys[i]);
218  ITYR_CHECK(cs.is_cached(keys[i]));
219  e.evictable = false;
220  }
221  for (key_t k : keys) {
222  cs.ensure_cached(k);
223  ITYR_CHECK(cs.is_cached(k));
224  for (int j = 0; j < nrem; j++) {
225  ITYR_CHECK(cs.is_cached(keys[j]));
226  }
227  }
228  for (int i = 0; i < nrem; i++) {
229  test_entry& e = cs.ensure_cached(keys[i]);
230  ITYR_CHECK(cs.is_cached(keys[i]));
231  e.evictable = true;
232  }
233  }
234 
235  ITYR_SUBCASE("should throw exception if cache is full") {
236  for (int i = 0; i < nelems; i++) {
237  test_entry& e = cs.ensure_cached(keys[i]);
238  ITYR_CHECK(cs.is_cached(keys[i]));
239  e.evictable = false;
240  }
241  ITYR_CHECK_THROWS_AS(cs.ensure_cached(keys[nelems]), cache_full_exception);
242  for (int i = 0; i < nelems; i++) {
243  test_entry& e = cs.ensure_cached(keys[i]);
244  ITYR_CHECK(cs.is_cached(keys[i]));
245  e.evictable = true;
246  }
247  cs.ensure_cached(keys[nelems]);
248  ITYR_CHECK(cs.is_cached(keys[nelems]));
249  }
250 
251  ITYR_SUBCASE("LRU eviction") {
252  for (int i = 0; i < nkey; i++) {
253  cs.ensure_cached(keys[i]);
254  ITYR_CHECK(cs.is_cached(keys[i]));
255  for (int j = 0; j <= i - nelems; j++) {
256  ITYR_CHECK(!cs.is_cached(keys[j]));
257  }
258  for (int j = std::max(0, i - nelems + 1); j < i; j++) {
259  ITYR_CHECK(cs.is_cached(keys[j]));
260  }
261  }
262  }
263 
264  for (key_t k : keys) {
265  cs.ensure_evicted(k);
266  }
267 }
268 
269 }
Definition: cache_system.hpp:32
Definition: cache_system.hpp:35
cache_system(cache_entry_idx_t nentries, const Entry &e)
Definition: cache_system.hpp:38
void for_each_entry(Func &&f)
Definition: cache_system.hpp:92
cache_system(cache_entry_idx_t nentries)
Definition: cache_system.hpp:37
bool is_cached(Key key) const
Definition: cache_system.hpp:47
cache_entry_idx_t num_entries() const
Definition: cache_system.hpp:45
Entry & ensure_cached(Key key)
Definition: cache_system.hpp:52
void ensure_evicted(Key key)
Definition: cache_system.hpp:77
#define ITYR_CHECK_THROWS_AS(exp, exception)
Definition: util.hpp:51
#define ITYR_SUBCASE(name)
Definition: util.hpp:41
#define ITYR_CHECK(cond)
Definition: util.hpp:48
Definition: block_region_set.hpp:9
std::unordered_map< Key, Value > unordered_map
Definition: cache_system.hpp:24
int cache_entry_idx_t
Definition: cache_system.hpp:30
monoid< T, max_functor<>, lowest< T > > max
Definition: reducer.hpp:104