35 #ifndef LIBPMEMOBJ_EBR_HPP
36 #define LIBPMEMOBJ_EBR_HPP
43 #include <unordered_map>
72 using atomic = std::atomic<size_t>;
73 using reference = std::reference_wrapper<atomic>;
88 worker(
const worker &w) =
delete;
89 worker(worker &&w) =
default;
92 worker &operator=(worker &w) =
delete;
93 worker &operator=(worker &&w) =
default;
99 worker(
ebr *e_, reference ref);
101 reference local_epoch;
108 static const size_t ACTIVE_FLAG =
static_cast<size_t>(1)
109 << (
sizeof(
size_t) * 8 - 1);
110 static const size_t EPOCHS_NUMBER = 3;
114 std::unordered_map<std::thread::id, atomic> workers;
123 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
124 VALGRIND_HG_DISABLE_CHECKING(&global_epoch,
sizeof(global_epoch));
142 std::lock_guard<std::mutex> lock(mtx);
143 auto res = workers.emplace(std::this_thread::get_id(), 0);
145 throw std::runtime_error(
146 "There can be only one worker per thread");
149 return worker{
this, reference{res.first->second}};
167 auto current_epoch = global_epoch.load();
169 std::lock_guard<std::mutex> lock(mtx);
170 for (
auto &w : workers) {
171 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_BEFORE(
172 std::memory_order_seq_cst, &w.second);
173 auto local_e = w.second.load();
174 bool active = local_e & ACTIVE_FLAG;
175 if (active && (local_e != (current_epoch | ACTIVE_FLAG))) {
180 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_BEFORE(std::memory_order_seq_cst,
182 global_epoch.store((current_epoch + 1) % EPOCHS_NUMBER);
197 size_t syncs_cnt = 0;
199 if (
sync() && ++syncs_cnt == EPOCHS_NUMBER) {
216 auto res = global_epoch.load();
217 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
233 auto res = (global_epoch.load() + 1) % EPOCHS_NUMBER;
234 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
239 ebr::worker::worker(
ebr *e_, reference ref) : local_epoch(ref), e(e_)
241 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
242 VALGRIND_HG_DISABLE_CHECKING(&ref.get(),
sizeof(ref.get()));
250 ebr::worker::~worker()
252 std::lock_guard<std::mutex> lock(e->mtx);
253 e->workers.erase(std::this_thread::get_id());
264 template <
typename F>
266 ebr::worker::critical(F &&f)
268 auto new_epoch = e->global_epoch.load() | ACTIVE_FLAG;
269 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
272 local_epoch.get().store(new_epoch);
273 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
278 local_epoch.get().store(0);
Epoch-based reclamation (EBR).
Definition: ebr.hpp:71
bool sync()
Attempts to synchronise and announce a new epoch.
Definition: ebr.hpp:165
worker register_worker()
Registers and returns a new worker, which can perform critical operations (accessing some shared data...
Definition: ebr.hpp:140
size_t staging_epoch()
Returns the epoch where objects can be staged for reclamation.
Definition: ebr.hpp:214
ebr()
Default and only ebr constructor.
Definition: ebr.hpp:121
void full_sync()
Perform full synchronisation ensuring that all objects which are no longer globally visible (and pote...
Definition: ebr.hpp:195
size_t gc_epoch()
Returns the epoch available for reclamation, i.e.
Definition: ebr.hpp:231
Commonly used functionality.
Persistent memory namespace.
Definition: allocation_flag.hpp:15