20 template <block_
size_t BlockSize>
26 : mmap_entry_limit_(mmap_entry_limit),
27 cs_(mmap_entry_limit_, mmap_entry(this)),
28 home_tlb_({
nullptr, 0},
nullptr) {}
30 template <
bool IncrementRef>
37 if constexpr (!enable_vm_map) {
41 auto me_p = home_tlb_.
get([&](
const std::pair<std::byte*, std::size_t>& seg) {
42 return seg.first <= addr && addr +
size <= seg.first + seg.second;
45 if (!me_p)
return false;
47 if constexpr (IncrementRef) {
48 mmap_entry& me = *me_p;
49 ITYR_CHECK((me_p == &mmap_entry_dummy_ || me.ref_count >= 0));
53 hprof_.record(
size,
true);
58 template <
bool IncrementRef>
64 std::size_t pm_offset,
69 if constexpr (!enable_vm_map) {
74 home_tlb_.
add({seg_addr, seg_size}, &mmap_entry_dummy_);
75 hprof_.record(seg_addr, seg_size, req_addr, req_size,
true);
79 mmap_entry& me = get_entry(seg_addr);
81 bool checkout_completed =
true;
83 bool mmap_hit = seg_addr == me.mapped_addr;
88 me.pm_offset = pm_offset;
89 home_segments_to_map_.push_back(&me);
90 checkout_completed =
false;
93 if constexpr (IncrementRef) {
98 home_tlb_.
add({seg_addr, seg_size}, &me);
100 hprof_.record(seg_addr, seg_size, req_addr, req_size, mmap_hit);
102 return checkout_completed;
105 template <
bool DecrementRef>
112 if constexpr (!enable_vm_map) {
116 auto me_p = home_tlb_.
get([&](
const std::pair<std::byte*, std::size_t>& seg) {
117 return seg.first <= addr && addr +
size <= seg.first + seg.second;
120 if (!me_p)
return false;
122 if constexpr (DecrementRef) {
123 mmap_entry& me = *me_p;
125 ITYR_CHECK((me_p == &mmap_entry_dummy_ || me.ref_count >= 0));
131 template <
bool DecrementRef>
133 if constexpr (!enable_vm_map) {
137 if (mapped_always)
return;
139 if constexpr (DecrementRef) {
140 mmap_entry& me = get_entry<false>(seg_addr);
147 if constexpr (!enable_vm_map) {
151 if (!home_segments_to_map_.empty()) {
152 for (mmap_entry* me : home_segments_to_map_) {
155 home_segments_to_map_.clear();
160 if constexpr (!enable_vm_map) {
173 hprof_.record(
size,
true);
181 using cache_key_t = uintptr_t;
185 std::byte* addr =
nullptr;
186 std::byte* mapped_addr =
nullptr;
187 std::size_t
size = 0;
188 std::size_t mapped_size = 0;
190 std::size_t pm_offset = 0;
194 explicit mmap_entry(
home_manager* outer_p) : outer(outer_p) {}
198 bool is_evictable()
const {
199 return ref_count == 0;
207 outer->home_tlb_.
clear();
215 template <
bool UpdateLRU = true>
216 mmap_entry& get_entry(
void* addr) {
218 return cs_.template ensure_cached<UpdateLRU>(cache_key(addr));
219 }
catch (cache_full_exception& e) {
220 common::die(
"home segments are exhausted (too much checked-out memory)");
224 void update_mapping(mmap_entry& me) {
227 if (me.mapped_addr) {
228 common::verbose<3>(
"Unmap home segment [%p, %p) (size=%ld)",
229 me.mapped_addr, me.mapped_addr + me.mapped_size, me.mapped_size);
235 common::verbose<3>(
"Map home segment [%p, %p) (size=%ld)",
236 me.addr, me.addr + me.size, me.size);
237 me.pm->map_to_vm(me.addr, me.size, me.pm_offset);
238 me.mapped_addr = me.addr;
239 me.mapped_size = me.size;
242 cache_key_t cache_key(
void* addr)
const {
244 ITYR_CHECK(
reinterpret_cast<uintptr_t
>(addr) % BlockSize == 0);
245 return reinterpret_cast<uintptr_t
>(addr) / BlockSize;
248 using home_tlb = tlb<std::pair<std::byte*, std::size_t>,
252 std::size_t mmap_entry_limit_;
253 cache_system<cache_key_t, mmap_entry> cs_;
254 mmap_entry mmap_entry_dummy_ = mmap_entry{
nullptr};
256 std::vector<mmap_entry*> home_segments_to_map_;
Definition: physical_mem.hpp:18
void ensure_evicted(Key key)
Definition: cache_system.hpp:77
Definition: home_manager.hpp:21
bool checkin_fast(const std::byte *addr, std::size_t size)
Definition: home_manager.hpp:106
void checkout_complete()
Definition: home_manager.hpp:146
bool checkout_seg(std::byte *seg_addr, std::size_t seg_size, std::byte *req_addr, std::size_t req_size, const common::physical_mem &pm, std::size_t pm_offset, bool mapped_always)
Definition: home_manager.hpp:59
home_manager(std::size_t mmap_entry_limit)
Definition: home_manager.hpp:25
void ensure_evicted(void *addr)
Definition: home_manager.hpp:159
void checkin_seg(std::byte *seg_addr, bool mapped_always)
Definition: home_manager.hpp:132
void home_prof_end()
Definition: home_manager.hpp:177
void home_prof_print() const
Definition: home_manager.hpp:178
bool checkout_fast(std::byte *addr, std::size_t size)
Definition: home_manager.hpp:31
void home_prof_begin()
Definition: home_manager.hpp:176
void on_checkout_noncoll(std::size_t size)
Definition: home_manager.hpp:171
void clear_tlb()
Definition: home_manager.hpp:167
void clear()
Definition: tlb.hpp:61
Entry get(const Key &key)
Definition: tlb.hpp:42
constexpr static bool enabled
Definition: tlb.hpp:14
void add(const Key &key, const Entry &entry)
Definition: tlb.hpp:21
#define ITYR_CHECK(cond)
Definition: util.hpp:48
void * mmap_no_physical_mem(void *addr, std::size_t size, bool replace=false, std::size_t alignment=alignof(max_align_t))
Definition: virtual_mem.hpp:110
Definition: block_region_set.hpp:9
int cache_entry_idx_t
Definition: cache_system.hpp:30
ITYR_CONCAT(home_profiler_, ITYR_ORI_CACHE_PROF) home_profiler
Definition: home_profiler.hpp:77
monoid< T, max_functor<>, lowest< T > > max
Definition: reducer.hpp:104
constexpr auto size(const checkout_span< T, Mode > &cs) noexcept
Definition: checkout_span.hpp:178
#define ITYR_ORI_HOME_TLB_SIZE
#define ITYR_ORI_ENABLE_VM_MAP
#define ITYR_PROFILER_RECORD(event,...)
Definition: profiler.hpp:319