35 #ifndef LIBPMEMOBJ_EBR_HPP
36 #define LIBPMEMOBJ_EBR_HPP
43 #include <unordered_map>
69 using atomic = std::atomic<size_t>;
70 using reference = std::reference_wrapper<atomic>;
85 worker(
const worker &w) =
delete;
86 worker(worker &&w) =
default;
89 worker &operator=(worker &w) =
delete;
90 worker &operator=(worker &&w) =
default;
96 worker(
ebr *e_, reference ref);
98 reference local_epoch;
105 static const size_t ACTIVE_FLAG =
static_cast<size_t>(1)
106 << (
sizeof(
size_t) * 8 - 1);
107 static const size_t EPOCHS_NUMBER = 3;
111 std::unordered_map<std::thread::id, atomic> workers;
120 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
121 VALGRIND_HG_DISABLE_CHECKING(&global_epoch,
sizeof(global_epoch));
139 std::lock_guard<std::mutex> lock(mtx);
140 auto res = workers.emplace(std::this_thread::get_id(), 0);
142 throw std::runtime_error(
143 "There can be only one worker per thread");
146 return worker{
this, reference{res.first->second}};
164 auto current_epoch = global_epoch.load();
166 std::lock_guard<std::mutex> lock(mtx);
167 for (
auto &w : workers) {
168 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_BEFORE(
169 std::memory_order_seq_cst, &w.second);
170 auto local_e = w.second.load();
171 bool active = local_e & ACTIVE_FLAG;
172 if (active && (local_e != (current_epoch | ACTIVE_FLAG))) {
177 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_BEFORE(std::memory_order_seq_cst,
179 global_epoch.store((current_epoch + 1) % EPOCHS_NUMBER);
194 size_t syncs_cnt = 0;
196 if (
sync() && ++syncs_cnt == EPOCHS_NUMBER) {
213 auto res = global_epoch.load();
214 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
230 auto res = (global_epoch.load() + 1) % EPOCHS_NUMBER;
231 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
236 ebr::worker::worker(
ebr *e_, reference ref) : local_epoch(ref), e(e_)
238 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
239 VALGRIND_HG_DISABLE_CHECKING(&ref.get(),
sizeof(ref.get()));
247 ebr::worker::~worker()
249 std::lock_guard<std::mutex> lock(e->mtx);
250 e->workers.erase(std::this_thread::get_id());
261 template <
typename F>
263 ebr::worker::critical(F &&f)
265 auto new_epoch = e->global_epoch.load() | ACTIVE_FLAG;
266 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
269 local_epoch.get().store(new_epoch);
270 LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
275 local_epoch.get().store(0);
Epoch-based reclamation (EBR).
Definition: ebr.hpp:68
bool sync()
Attempts to synchronise and announce a new epoch.
Definition: ebr.hpp:162
worker register_worker()
Registers and returns a new worker, which can perform critical operations (accessing some shared data...
Definition: ebr.hpp:137
size_t staging_epoch()
Returns the epoch where objects can be staged for reclamation.
Definition: ebr.hpp:211
ebr()
Default and only ebr constructor.
Definition: ebr.hpp:118
void full_sync()
Perform full synchronisation ensuring that all objects which are no longer globally visible (and pote...
Definition: ebr.hpp:192
size_t gc_epoch()
Returns the epoch available for reclamation, i.e.
Definition: ebr.hpp:228
Commonly used functionality.
Persistent memory namespace.
Definition: allocation_flag.hpp:15