10 #ifndef PMEMOBJ_CONCURRENT_HASH_MAP_HPP
11 #define PMEMOBJ_CONCURRENT_HASH_MAP_HPP
15 #include <libpmemobj++/detail/pair.hpp>
25 #include <libpmemobj++/detail/persistent_pool_ptr.hpp>
33 #include <initializer_list>
38 #include <type_traits>
48 struct hash<
pmem::obj::p<T>> {
52 return hash<T>()(x.
get_ro());
62 namespace concurrent_hash_map_internal
64 template <
typename SharedMutexT>
65 class shared_mutex_scoped_lock {
66 using rw_mutex_type = SharedMutexT;
69 shared_mutex_scoped_lock(
const shared_mutex_scoped_lock &) =
delete;
70 shared_mutex_scoped_lock &
71 operator=(
const shared_mutex_scoped_lock &) =
delete;
74 shared_mutex_scoped_lock() : mutex(nullptr), is_writer(false)
79 shared_mutex_scoped_lock(rw_mutex_type &m,
bool write =
true)
86 ~shared_mutex_scoped_lock()
94 acquire(rw_mutex_type &m,
bool write =
true)
101 mutex->lock_shared();
111 rw_mutex_type *m = mutex;
125 try_acquire(rw_mutex_type &m,
bool write =
true)
130 result = write ? m.try_lock() : m.try_lock_shared();
141 rw_mutex_type *mutex;
149 template <
typename ScopedLockType>
150 using scoped_lock_upgrade_to_writer =
151 decltype(std::declval<ScopedLockType>().upgrade_to_writer());
153 template <
typename ScopedLockType>
154 using scoped_lock_has_upgrade_to_writer =
155 detail::supports<ScopedLockType, scoped_lock_upgrade_to_writer>;
157 template <
typename ScopedLockType>
158 using scoped_lock_downgrade_to_reader =
159 decltype(std::declval<ScopedLockType>().downgrade_to_reader());
161 template <
typename ScopedLockType>
162 using scoped_lock_has_downgrade_to_reader =
163 detail::supports<ScopedLockType, scoped_lock_downgrade_to_reader>;
165 template <
typename ScopedLockType,
166 bool = scoped_lock_has_upgrade_to_writer<ScopedLockType>::value
167 &&scoped_lock_has_downgrade_to_reader<ScopedLockType>::value>
168 class scoped_lock_traits {
170 using scope_lock_type = ScopedLockType;
173 initial_rw_state(
bool write)
180 upgrade_to_writer(scope_lock_type &lock)
182 return lock.upgrade_to_writer();
186 downgrade_to_reader(scope_lock_type &lock)
188 return lock.downgrade_to_reader();
192 template <
typename ScopedLockType>
193 class scoped_lock_traits<ScopedLockType, false> {
195 using scope_lock_type = ScopedLockType;
198 initial_rw_state(
bool write)
206 upgrade_to_writer(scope_lock_type &lock)
215 downgrade_to_reader(scope_lock_type &lock)
227 template <
typename Key,
typename T,
typename Hash = std::hash<Key>,
228 typename KeyEqual = std::equal_to<Key>,
229 typename MutexType = pmem::obj::shared_mutex,
230 typename ScopedLockType = concurrent_hash_map_
internal::
231 shared_mutex_scoped_lock<MutexType>>
232 class concurrent_hash_map;
235 namespace concurrent_hash_map_internal
241 if (pmemobj_tx_stage() != TX_STAGE_NONE)
243 "Function called inside transaction scope.");
246 template <
typename Hash>
247 using transparent_key_equal =
typename Hash::transparent_key_equal;
249 template <
typename Hash>
250 using has_transparent_key_equal = detail::supports<Hash, transparent_key_equal>;
252 template <
typename Hash,
typename Pred,
253 bool = has_transparent_key_equal<Hash>::value>
254 struct key_equal_type {
255 using type =
typename Hash::transparent_key_equal;
258 template <
typename Hash,
typename Pred>
259 struct key_equal_type<Hash, Pred, false> {
263 template <
typename Mutex,
typename ScopedLockType>
265 assert_not_locked(Mutex &mtx)
268 ScopedLockType scoped_lock;
269 assert(scoped_lock.try_acquire(mtx));
270 scoped_lock.release();
276 template <
typename Key,
typename T,
typename MutexType,
typename ScopedLockType>
277 struct hash_map_node {
279 using mutex_t = MutexType;
282 using scoped_t = ScopedLockType;
284 using value_type = detail::pair<const Key, T>;
287 using node_ptr_t = detail::persistent_pool_ptr<
288 hash_map_node<Key, T, mutex_t, scoped_t>>;
299 hash_map_node(
const node_ptr_t &_next,
const Key &key)
301 item(std::piecewise_construct, std::forward_as_tuple(key),
302 std::forward_as_tuple())
306 hash_map_node(
const node_ptr_t &_next,
const Key &key,
const T &t)
307 : next(_next), item(key, t)
311 hash_map_node(
const node_ptr_t &_next, value_type &&i)
312 : next(_next), item(std::move(i))
316 template <
typename... Args>
317 hash_map_node(
const node_ptr_t &_next, Args &&... args)
318 : next(_next), item(std::forward<Args>(args)...)
322 hash_map_node(
const node_ptr_t &_next,
const value_type &i)
323 : next(_next), item(i)
328 hash_map_node(
const hash_map_node &) =
delete;
331 hash_map_node &operator=(
const hash_map_node &) =
delete;
338 template <
typename Bucket>
339 class segment_traits {
342 using segment_index_t = size_t;
343 using size_type = size_t;
344 using bucket_type = Bucket;
348 constexpr
static size_type max_allocation_size = PMEMOBJ_MAX_ALLOC_SIZE;
351 constexpr
static segment_index_t first_big_block = 27;
356 constexpr
static size_type big_block_size = size_type(1)
360 static_assert((big_block_size *
sizeof(bucket_type)) <
362 "Block size exceeds max_allocation_size");
365 constexpr
static segment_index_t
366 first_block_in_segment(segment_index_t seg)
368 return seg < first_big_block
371 (segment_index_t(1) << (seg - first_big_block)) - 1);
375 constexpr
static size_type
376 blocks_in_segment(segment_index_t seg)
378 return seg < first_big_block
380 : segment_index_t(1) << (seg - first_big_block);
384 constexpr
static size_type
385 block_size(segment_index_t b)
387 return b < first_big_block ? segment_size(b ? b : 1)
393 constexpr
static segment_index_t embedded_segments = 1;
396 constexpr
static size_type embedded_buckets = 1 << embedded_segments;
399 constexpr
static segment_index_t number_of_segments = 32;
402 static const size_type first_block = 8;
405 constexpr
static segment_index_t
408 return first_block_in_segment(number_of_segments);
412 static segment_index_t
413 segment_index_of(size_type index)
415 return segment_index_t(detail::Log2(index | 1));
419 constexpr
static segment_index_t
420 segment_base(segment_index_t k)
422 return (segment_index_t(1) << k) & ~segment_index_t(1);
426 constexpr
static size_type
427 segment_size(segment_index_t k)
429 return size_type(1) << k;
432 embedded_segments < first_big_block,
433 "Number of embedded segments cannot exceed max_allocation_size");
452 template <
typename BlockTable,
typename SegmentTraits,
bool is_const>
453 class segment_facade_impl :
public SegmentTraits {
455 using traits_type = SegmentTraits;
456 using traits_type::block_size;
457 using traits_type::blocks_in_segment;
458 using traits_type::embedded_buckets;
459 using traits_type::embedded_segments;
460 using traits_type::first_block;
461 using traits_type::first_block_in_segment;
462 using traits_type::segment_base;
463 using traits_type::segment_size;
466 using table_reference =
467 typename std::conditional<is_const,
const BlockTable &,
470 using table_pointer =
471 typename std::conditional<is_const,
const BlockTable *,
474 using bucket_type =
typename traits_type::bucket_type;
475 using segment_index_t =
typename traits_type::segment_index_t;
476 using size_type =
typename traits_type::size_type;
479 segment_facade_impl(table_reference table, segment_index_t s)
480 : my_table(&table), my_seg(s)
482 assert(my_seg < traits_type::number_of_segments);
486 segment_facade_impl(
const segment_facade_impl &src)
487 : my_table(src.my_table), my_seg(src.my_seg)
491 segment_facade_impl(segment_facade_impl &&src) =
default;
494 segment_facade_impl &
495 operator=(
const segment_facade_impl &src)
497 my_table = src.my_table;
503 segment_facade_impl &
504 operator=(segment_facade_impl &&src)
506 my_table = src.my_table;
517 bucket_type &operator[](size_type i)
const
521 segment_index_t table_block = first_block_in_segment(my_seg);
522 size_type b_size = block_size(table_block);
524 table_block += i / b_size;
527 return (*my_table)[table_block][
static_cast<std::ptrdiff_t
>(i)];
533 segment_facade_impl &
546 segment_facade_impl tmp = *
this;
554 segment_facade_impl &
567 segment_facade_impl tmp = *
this;
575 segment_facade_impl &
585 segment_facade_impl &
598 return segment_facade_impl(*(this->my_table),
608 return segment_facade_impl(*(this->my_table),
616 enable(pool_base &pop)
618 assert(my_seg >= embedded_segments);
620 if (my_seg < first_block) {
621 enable_first_block(pop);
623 enable_big_segment(pop);
633 assert(my_seg >= embedded_segments);
635 if (my_seg < first_block) {
636 if (my_seg == embedded_segments) {
637 size_type sz = segment_size(first_block) -
639 delete_persistent<bucket_type[]>(
640 (*my_table)[my_seg], sz);
642 (*my_table)[my_seg] =
nullptr;
644 block_range blocks = segment_blocks(my_seg);
646 for (segment_index_t b = blocks.first;
647 b < blocks.second; ++b) {
648 if ((*my_table)[b] !=
nullptr) {
649 delete_persistent<bucket_type[]>(
650 (*my_table)[b], block_size(b));
651 (*my_table)[b] =
nullptr;
663 return segment_size(my_seg ? my_seg : 1);
674 block_range blocks = segment_blocks(my_seg);
676 for (segment_index_t b = blocks.first; b < blocks.second; ++b) {
677 if ((*my_table)[b] ==
nullptr)
685 using block_range = std::pair<segment_index_t, segment_index_t>;
691 segment_blocks(segment_index_t seg)
693 segment_index_t
begin = first_block_in_segment(seg);
695 return block_range(
begin,
begin + blocks_in_segment(seg));
699 enable_first_block(pool_base &pop)
701 assert(my_seg == embedded_segments);
706 segment_size(first_block) - embedded_buckets;
707 (*my_table)[my_seg] =
708 make_persistent<bucket_type[]>(sz);
710 persistent_ptr<bucket_type> base =
711 (*my_table)[embedded_segments].raw();
713 for (segment_index_t s = my_seg + 1; s < first_block;
716 static_cast<std::ptrdiff_t
>(
718 segment_base(my_seg));
720 (*my_table)[s] = (base + off).raw();
728 enable_big_segment(pool_base &pop)
730 block_range blocks = segment_blocks(my_seg);
734 for (segment_index_t b = blocks.first;
735 b < blocks.second; ++b) {
736 assert((*my_table)[b] ==
nullptr);
737 (*my_table)[b] = make_persistent<bucket_type[]>(
746 table_pointer my_table;
749 segment_index_t my_seg;
758 template <
typename Key,
typename T,
typename MutexType,
typename ScopedLockType>
759 class hash_map_base {
761 using mutex_t = MutexType;
762 using scoped_t = ScopedLockType;
765 using size_type = size_t;
768 using hashcode_type = size_t;
771 using node = hash_map_node<Key, T, mutex_t, scoped_t>;
774 using node_ptr_t = detail::persistent_pool_ptr<node>;
778 using mutex_t = MutexType;
779 using scoped_t = ScopedLockType;
785 p<std::atomic<uint64_t>> rehashed;
788 node_ptr_t node_list;
791 bucket() : node_list(nullptr)
793 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
794 VALGRIND_HG_DISABLE_CHECKING(&rehashed,
797 rehashed.get_rw() =
false;
806 is_rehashed(std::memory_order order)
808 return rehashed.get_ro().load(order);
812 set_rehashed(std::memory_order order)
814 rehashed.get_rw().store(
true, order);
818 bucket(
const bucket &) =
delete;
821 bucket &operator=(
const bucket &) =
delete;
825 using segment_traits_t = segment_traits<bucket>;
828 using segment_index_t =
typename segment_traits_t::segment_index_t;
831 static const size_type embedded_buckets =
832 segment_traits_t::embedded_buckets;
835 static const size_type first_block = segment_traits_t::first_block;
838 constexpr
static size_type block_table_size =
839 segment_traits_t::number_of_blocks();
842 using segment_ptr_t = persistent_ptr<bucket[]>;
845 using bucket_ptr_t = persistent_ptr<bucket>;
848 using blocks_table_t = segment_ptr_t[block_table_size];
855 p<int64_t> size_diff = 0;
856 std::aligned_storage<56, 8> padding;
859 using tls_t = detail::enumerable_thread_specific<tls_data_t>;
861 enum feature_flags : uint32_t { FEATURE_CONSISTENT_SIZE = 1 };
866 p<uint32_t> incompat;
872 p<uint64_t> my_pool_uuid;
876 features layout_features;
880 std::aligned_storage<
sizeof(size_t),
sizeof(
size_t)>::type
885 std::atomic<hashcode_type> my_mask;
888 std::size_t value_size;
891 std::aligned_storage<24, 8>::type padding1;
897 blocks_table_t my_table;
902 std::atomic<size_type> my_size;
905 std::aligned_storage<24, 8>::type padding2;
908 persistent_ptr<tls_t> tls_ptr;
915 p<size_t> on_init_size;
918 std::aligned_storage<40, 8>::type reserved;
921 segment_enable_mutex_t my_segment_enable_mutex;
924 bucket my_embedded_segment[embedded_buckets];
929 static constexpr features
932 return {FEATURE_CONSISTENT_SIZE, 0};
935 const std::atomic<hashcode_type> &
936 mask() const noexcept
941 std::atomic<hashcode_type> &
950 return my_size.load(std::memory_order_relaxed);
956 assert(this->tls_ptr !=
nullptr);
957 return this->tls_ptr->local().size_diff;
964 assert(this->tls_ptr !=
nullptr);
966 pool_base pop = pool_base{pmemobj_pool_by_ptr(
this)};
968 int64_t last_run_size = 0;
969 for (
auto &data : *tls_ptr)
970 last_run_size += data.size_diff;
973 assert(last_run_size >= 0 ||
974 static_cast<int64_t
>(
static_cast<size_t>(last_run_size) +
978 on_init_size +=
static_cast<size_t>(last_run_size);
982 this->my_size = on_init_size;
986 using const_segment_facade_t =
987 segment_facade_impl<blocks_table_t, segment_traits_t, true>;
990 using segment_facade_t =
991 segment_facade_impl<blocks_table_t, segment_traits_t, false>;
997 sizeof(size_type) ==
sizeof(std::atomic<size_type>),
998 "std::atomic should have the same layout as underlying integral type");
1000 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1001 VALGRIND_HG_DISABLE_CHECKING(&my_mask,
sizeof(my_mask));
1003 layout_features = {0, 0};
1005 PMEMoid oid = pmemobj_oid(
this);
1007 assert(!OID_IS_NULL(oid));
1009 my_pool_uuid = oid.pool_uuid_lo;
1011 pool_base pop = get_pool_base();
1013 for (size_type i = 0; i < segment_traits_t::embedded_segments;
1016 pmemobj_oid(my_embedded_segment +
1017 segment_traits_t::segment_base(i));
1018 segment_facade_t seg(my_table, i);
1019 mark_rehashed<false>(pop, seg);
1026 this->tls_ptr =
nullptr;
1037 auto pop = get_pool_base();
1039 if ((layout_features.compat & FEATURE_CONSISTENT_SIZE) &&
1042 delete_persistent<tls_t>(tls_ptr);
1054 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1055 VALGRIND_HG_DISABLE_CHECKING(&my_size,
sizeof(my_size));
1056 VALGRIND_HG_DISABLE_CHECKING(&my_mask,
sizeof(my_mask));
1058 #if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED
1059 VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_size,
sizeof(my_size));
1060 VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_mask,
sizeof(my_mask));
1063 hashcode_type m = embedded_buckets - 1;
1065 const_segment_facade_t segment(
1066 my_table, segment_traits_t::embedded_segments);
1068 while (segment.is_valid()) {
1069 m += segment.size();
1073 mask().store(m, std::memory_order_relaxed);
1079 template <
bool Flush = true>
1081 mark_rehashed(pool_base &pop, segment_facade_t &segment)
1083 for (size_type i = 0; i < segment.size(); ++i) {
1084 bucket *b = &(segment[i]);
1086 assert_not_locked<mutex_t, scoped_t>(b->mutex);
1088 b->set_rehashed(std::memory_order_relaxed);
1093 for (size_type i = 0; i < segment.size(); ++i) {
1094 bucket *b = &(segment[i]);
1095 pop.flush(b->rehashed);
1106 enable_segment(segment_index_t k,
bool is_initial =
false)
1110 pool_base pop = get_pool_base();
1113 if (k >= first_block) {
1114 segment_facade_t new_segment(my_table, k);
1116 sz = new_segment.size();
1117 if (!new_segment.is_valid())
1118 new_segment.enable(pop);
1121 mark_rehashed(pop, new_segment);
1128 assert(k == segment_traits_t::embedded_segments);
1130 for (segment_index_t i = k; i < first_block; ++i) {
1131 segment_facade_t new_segment(my_table, i);
1133 if (!new_segment.is_valid())
1134 new_segment.enable(pop);
1137 mark_rehashed(pop, new_segment);
1141 sz = segment_traits_t::segment_size(first_block);
1143 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1144 ANNOTATE_HAPPENS_BEFORE(&my_mask);
1146 mask().store(sz - 1, std::memory_order_release);
1154 get_bucket(hashcode_type h)
const
1156 segment_index_t s = segment_traits_t::segment_index_of(h);
1158 h -= segment_traits_t::segment_base(s);
1160 const_segment_facade_t segment(my_table, s);
1162 assert(segment.is_valid());
1164 return &(segment[h]);
1171 check_mask_race(hashcode_type h, hashcode_type &m)
const
1173 hashcode_type m_now, m_old = m;
1175 m_now = mask().load(std::memory_order_acquire);
1176 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1177 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
1181 return check_rehashing_collision(h, m_old, m = m_now);
1190 check_rehashing_collision(hashcode_type h, hashcode_type m_old,
1191 hashcode_type m)
const
1195 if ((h & m_old) != (h & m)) {
1200 for (++m_old; !(h & m_old); m_old <<= 1)
1203 m_old = (m_old << 1) - 1;
1205 assert((m_old & (m_old + 1)) == 0 && m_old <= m);
1208 bucket *b = get_bucket(h & m_old);
1209 return b->is_rehashed(std::memory_order_acquire);
1219 template <
typename Node,
typename... Args>
1221 insert_new_node_internal(bucket *b,
1222 detail::persistent_pool_ptr<Node> &new_node,
1225 assert(pmemobj_tx_stage() == TX_STAGE_WORK);
1227 new_node = pmem::obj::make_persistent<Node>(
1228 b->node_list, std::forward<Args>(args)...);
1229 b->node_list = new_node;
1236 template <
typename Node,
typename... Args>
1238 insert_new_node(bucket *b, detail::persistent_pool_ptr<Node> &new_node,
1241 pool_base pop = get_pool_base();
1248 if (pmemobj_tx_stage() == TX_STAGE_WORK) {
1249 insert_new_node_internal(b, new_node,
1250 std::forward<Args>(args)...);
1251 this->on_init_size++;
1253 auto &size_diff = thread_size_diff();
1256 insert_new_node_internal(
1258 std::forward<Args>(args)...);
1264 return ++(this->my_size);
1272 check_growth(hashcode_type m, size_type sz)
1275 segment_index_t new_seg =
1276 static_cast<segment_index_t
>(detail::Log2(
1280 assert(segment_facade_t(my_table, new_seg - 1)
1283 std::unique_lock<segment_enable_mutex_t> lock(
1284 my_segment_enable_mutex, std::try_to_lock);
1287 if (mask().load(std::memory_order_relaxed) ==
1291 enable_segment(new_seg);
1305 reserve(size_type buckets)
1312 bool is_initial = this->size() == 0;
1314 for (size_type m = mask(); buckets > m; m = mask())
1316 segment_traits_t::segment_index_of(m + 1),
1325 internal_swap(hash_map_base<Key, T, mutex_t, scoped_t> &table)
1327 pool_base p = get_pool_base();
1331 this->my_pool_uuid.swap(table.my_pool_uuid);
1342 this->mask() = table.mask().exchange(
1343 this->mask(), std::memory_order_relaxed);
1345 this->my_size = table.my_size.exchange(
1346 this->my_size, std::memory_order_relaxed);
1349 std::swap(this->tls_ptr, table.tls_ptr);
1351 for (size_type i = 0; i < embedded_buckets; ++i)
1352 this->my_embedded_segment[i].node_list.swap(
1353 table.my_embedded_segment[i].node_list);
1355 for (size_type i = segment_traits_t::embedded_segments;
1356 i < block_table_size; ++i)
1357 this->my_table[i].
swap(table.my_table[i]);
1371 pmemobj_pool_by_oid(PMEMoid{my_pool_uuid, 0});
1373 return pool_base(pop);
1382 template <
typename Container,
bool is_const>
1383 class hash_map_iterator {
1385 using iterator_category = std::forward_iterator_tag;
1386 using difference_type = ptrdiff_t;
1387 using map_type = Container;
1388 using value_type =
typename map_type::value_type;
1389 using node =
typename map_type::node;
1390 using bucket =
typename map_type::bucket;
1391 using map_ptr =
typename std::conditional<is_const,
const map_type *,
1394 typename std::conditional<is_const,
1395 typename map_type::const_reference,
1396 typename map_type::reference>::type;
1398 typename std::conditional<is_const,
1399 typename map_type::const_pointer,
1400 typename map_type::pointer>::type;
1402 template <
typename C,
bool M,
bool U>
1403 friend bool operator==(
const hash_map_iterator<C, M> &i,
1404 const hash_map_iterator<C, U> &j);
1406 template <
typename C,
bool M,
bool U>
1407 friend bool operator!=(
const hash_map_iterator<C, M> &i,
1408 const hash_map_iterator<C, U> &j);
1410 friend class hash_map_iterator<map_type, true>;
1412 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
1414 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
1415 typename MutexType,
typename ScopedLockType>
1416 friend class ::pmem::obj::concurrent_hash_map;
1420 hash_map_iterator(map_ptr map,
size_t index)
1421 : my_map(map), my_index(index), my_bucket(nullptr), my_node(nullptr)
1423 if (my_index <= my_map->mask()) {
1424 bucket_accessor acc(my_map, my_index);
1425 my_bucket = acc.get();
1426 my_node =
static_cast<node *
>(
1427 my_bucket->node_list.get(my_map->my_pool_uuid));
1430 advance_to_next_bucket();
1437 hash_map_iterator() =
default;
1440 hash_map_iterator(
const hash_map_iterator &other)
1441 : my_map(other.my_map),
1442 my_index(other.my_index),
1443 my_bucket(other.my_bucket),
1444 my_node(other.my_node)
1449 template <
typename U = void,
1450 typename =
typename std::enable_if<is_const, U>::type>
1451 hash_map_iterator(
const hash_map_iterator<map_type, false> &other)
1452 : my_map(other.my_map),
1453 my_index(other.my_index),
1454 my_bucket(other.my_bucket),
1455 my_node(other.my_node)
1459 hash_map_iterator &operator=(
const hash_map_iterator &it) =
default;
1462 reference operator*()
const
1465 return my_node->item;
1469 pointer operator->()
const
1471 return &operator*();
1478 my_node =
static_cast<node *
>(
1479 my_node->next.get((my_map->my_pool_uuid)));
1482 advance_to_next_bucket();
1491 hash_map_iterator old(*
this);
1498 map_ptr my_map =
nullptr;
1501 size_t my_index = 0;
1504 bucket *my_bucket =
nullptr;
1507 node *my_node =
nullptr;
1509 class bucket_accessor {
1511 bucket_accessor(map_ptr m,
size_t index)
1513 my_bucket = m->get_bucket(index);
1527 advance_to_next_bucket()
1529 size_t k = my_index + 1;
1533 while (k <= my_map->mask()) {
1534 bucket_accessor acc(my_map, k);
1535 my_bucket = acc.get();
1537 if (my_bucket->node_list) {
1538 my_node =
static_cast<node *
>(
1539 my_bucket->node_list.get(
1540 my_map->my_pool_uuid));
1556 template <
typename Container,
bool M,
bool U>
1558 operator==(
const hash_map_iterator<Container, M> &i,
1559 const hash_map_iterator<Container, U> &j)
1561 return i.my_node == j.my_node && i.my_map == j.my_map;
1564 template <
typename Container,
bool M,
bool U>
1566 operator!=(
const hash_map_iterator<Container, M> &i,
1567 const hash_map_iterator<Container, U> &j)
1569 return i.my_node != j.my_node || i.my_map != j.my_map;
1624 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
1625 typename MutexType,
typename ScopedLockType>
1627 :
protected concurrent_hash_map_internal::hash_map_base<Key, T, MutexType,
1629 template <
typename Container,
bool is_const>
1630 friend class concurrent_hash_map_internal::hash_map_iterator;
1633 using size_type =
typename concurrent_hash_map_internal::hash_map_base<
1634 Key, T, MutexType, ScopedLockType>::size_type;
1635 using hashcode_type =
1636 typename concurrent_hash_map_internal::hash_map_base<
1637 Key, T, MutexType, ScopedLockType>::hashcode_type;
1638 using key_type = Key;
1639 using mapped_type = T;
1640 using value_type =
typename concurrent_hash_map_internal::hash_map_base<
1641 Key, T, MutexType, ScopedLockType>::node::value_type;
1642 using difference_type = ptrdiff_t;
1643 using pointer = value_type *;
1644 using const_pointer =
const value_type *;
1645 using reference = value_type &;
1646 using const_reference =
const value_type &;
1647 using iterator = concurrent_hash_map_internal::hash_map_iterator<
1649 using const_iterator = concurrent_hash_map_internal::hash_map_iterator<
1651 using hasher = Hash;
1652 using key_equal =
typename concurrent_hash_map_internal::key_equal_type<
1653 Hash, KeyEqual>::type;
1656 using mutex_t = MutexType;
1657 using scoped_t = ScopedLockType;
1661 using hash_map_base =
1662 concurrent_hash_map_internal::hash_map_base<Key, T, mutex_t,
1664 using hash_map_base::calculate_mask;
1665 using hash_map_base::check_growth;
1666 using hash_map_base::check_mask_race;
1667 using hash_map_base::embedded_buckets;
1668 using hash_map_base::FEATURE_CONSISTENT_SIZE;
1669 using hash_map_base::get_bucket;
1670 using hash_map_base::get_pool_base;
1671 using hash_map_base::header_features;
1672 using hash_map_base::insert_new_node;
1673 using hash_map_base::internal_swap;
1674 using hash_map_base::layout_features;
1675 using hash_map_base::mask;
1676 using hash_map_base::reserve;
1677 using tls_t =
typename hash_map_base::tls_t;
1678 using node =
typename hash_map_base::node;
1679 using node_mutex_t =
typename node::mutex_t;
1680 using node_ptr_t =
typename hash_map_base::node_ptr_t;
1681 using bucket =
typename hash_map_base::bucket;
1682 using bucket_lock_type =
typename bucket::scoped_t;
1683 using segment_index_t =
typename hash_map_base::segment_index_t;
1684 using segment_traits_t =
typename hash_map_base::segment_traits_t;
1685 using segment_facade_t =
typename hash_map_base::segment_facade_t;
1686 using scoped_lock_traits_type =
1687 concurrent_hash_map_internal::scoped_lock_traits<scoped_t>;
1690 using persistent_node_ptr_t = detail::persistent_pool_ptr<node>;
1693 delete_node(
const node_ptr_t &n)
1695 delete_persistent<node>(
1696 detail::static_persistent_pool_pointer_cast<node>(n)
1697 .get_persistent_ptr(this->my_pool_uuid));
1700 template <
typename K>
1701 persistent_node_ptr_t
1702 search_bucket(
const K &key, bucket *b)
const
1704 assert(b->is_rehashed(std::memory_order_relaxed));
1706 persistent_node_ptr_t n =
1707 detail::static_persistent_pool_pointer_cast<node>(
1712 n.get(this->my_pool_uuid)->item.first)) {
1713 n = detail::static_persistent_pool_pointer_cast<node>(
1714 n.get(this->my_pool_uuid)->next);
1730 bucket_lock_type::mutex = b.bucket_lock_type::mutex;
1731 bucket_lock_type::is_writer =
1732 b.bucket_lock_type::is_writer;
1734 b.bucket_lock_type::mutex =
nullptr;
1735 b.bucket_lock_type::is_writer =
false;
1739 const hashcode_type h,
bool writer =
false)
1755 bool writer =
false)
1757 my_b = base->get_bucket(h);
1759 if (my_b->is_rehashed(std::memory_order_acquire) ==
1761 bucket_lock_type::try_acquire(this->my_b->mutex,
1763 if (my_b->is_rehashed(
1764 std::memory_order_relaxed) ==
1767 base->rehash_bucket<
false>(my_b, h);
1770 bucket_lock_type::acquire(my_b->mutex, writer);
1773 assert(my_b->is_rehashed(std::memory_order_relaxed));
1782 return bucket_lock_type::is_writer;
1813 const hashcode_type h,
1814 bool writer =
false)
1816 acquire(base, h, writer);
1824 bool writer =
false)
1826 my_b = base->get_bucket(h);
1828 if (my_b->is_rehashed(std::memory_order_relaxed) ==
1831 base->rehash_bucket<
true>(my_b, h);
1834 assert(my_b->is_rehashed(std::memory_order_relaxed));
1870 get_hash_code(node_ptr_t &n)
1873 detail::static_persistent_pool_pointer_cast<node>(n)(
1878 template <
bool serial>
1880 rehash_bucket(bucket *b_new,
const hashcode_type h)
1882 using accessor_type =
typename std::conditional<
1883 serial, serial_bucket_accessor, bucket_accessor>::type;
1885 using scoped_lock_traits_type =
1886 concurrent_hash_map_internal::scoped_lock_traits<
1892 pool_base pop = get_pool_base();
1893 node_ptr_t *p_new = &(b_new->node_list);
1897 if (*p_new !=
nullptr) {
1898 assert(!b_new->is_rehashed(std::memory_order_relaxed));
1900 b_new->set_rehashed(std::memory_order_relaxed);
1901 pop.persist(b_new->rehashed);
1907 hashcode_type mask = (1u << detail::Log2(h)) - 1;
1908 assert((h & mask) < h);
1909 accessor_type b_old(
1911 scoped_lock_traits_type::initial_rw_state(
true));
1915 mask = (mask << 1) | 1;
1916 assert((mask & (mask + 1)) == 0 && (h & mask) == h);
1919 for (node_ptr_t *p_old = &(b_old->node_list),
1922 hashcode_type c = get_hash_code(n);
1924 hashcode_type bmask = h & (mask >> 1);
1928 : (1u << (detail::Log2(bmask) + 1)) - 1;
1930 assert((c & bmask) == (h & bmask));
1933 if ((c & mask) == h) {
1934 if (!b_old.is_writer() &&
1935 !scoped_lock_traits_type::
1936 upgrade_to_writer(b_old)) {
1946 *p_old = n(this->my_pool_uuid)->next;
1948 p_new = &(n(this->my_pool_uuid)->next);
1951 p_old = &(n(this->my_pool_uuid)->next);
1959 b_new->set_rehashed(std::memory_order_release);
1960 pop.persist(b_new->rehashed);
1964 check_incompat_features()
1966 if (layout_features.incompat != header_features().incompat)
1968 "Incompat flags mismatch, for more details go to: https://pmem.io/libpmemobj-cpp\n");
1970 if ((layout_features.compat & FEATURE_CONSISTENT_SIZE) &&
1971 this->value_size !=
sizeof(value_type))
1973 "Size of value_type is different than the one stored in the pool\n");
1982 :
protected node::scoped_t {
1987 using node::scoped_t::try_acquire;
1994 const typename concurrent_hash_map::value_type;
2015 concurrent_hash_map_internal::check_outside_tx();
2018 node::scoped_t::release();
2030 return my_node->item;
2048 concurrent_hash_map_internal::check_outside_tx();
2063 hashcode_type my_hash;
2078 assert(this->my_node);
2080 return this->my_node->item;
2116 reserve(table.
size());
2134 template <
typename I>
2139 reserve(
static_cast<size_type
>(std::distance(first, last)));
2167 check_incompat_features();
2175 if (!(layout_features.compat & FEATURE_CONSISTENT_SIZE)) {
2177 std::distance(this->
begin(), this->
end());
2178 assert(actual_size >= 0);
2180 this->my_size =
static_cast<size_t>(actual_size);
2182 auto pop = get_pool_base();
2184 this->tls_ptr = make_persistent<tls_t>();
2185 this->on_init_size =
2186 static_cast<size_t>(actual_size);
2187 this->value_size =
sizeof(value_type);
2189 layout_features.compat |=
2190 FEATURE_CONSISTENT_SIZE;
2193 assert(this->tls_ptr !=
nullptr);
2194 this->tls_restore();
2197 assert(this->
size() ==
2198 size_type(std::distance(this->
begin(), this->
end())));
2202 "runtime_initialize(bool) is now deprecated, use runtime_initialize(void)")]]
void
2205 check_incompat_features();
2209 if (!graceful_shutdown) {
2211 std::distance(this->
begin(), this->
end());
2212 assert(actual_size >= 0);
2213 this->my_size =
static_cast<size_type
>(actual_size);
2215 assert(this->
size() ==
2216 size_type(std::distance(this->
begin(),
2232 concurrent_hash_map &
2235 if (
this != &table) {
2274 void rehash(size_type n = 0);
2307 auto pop = get_pool_base();
2345 return iterator(
this, 0);
2355 return iterator(
this, mask() + 1);
2365 return const_iterator(
this, 0);
2375 return const_iterator(
this, mask() + 1);
2384 return hash_map_base::size();
2393 return this->
size() == 0;
2402 return (~size_type(0)) /
sizeof(node);
2431 concurrent_hash_map_internal::check_outside_tx();
2434 key,
nullptr,
false);
2448 template <
typename K,
2449 typename =
typename std::enable_if<
2450 concurrent_hash_map_internal::
2451 has_transparent_key_equal<hasher>::value,
2456 concurrent_hash_map_internal::check_outside_tx();
2459 key,
nullptr,
false);
2471 concurrent_hash_map_internal::check_outside_tx();
2476 key, &result,
false);
2492 template <
typename K,
2493 typename =
typename std::enable_if<
2494 concurrent_hash_map_internal::
2495 has_transparent_key_equal<hasher>::value,
2500 concurrent_hash_map_internal::check_outside_tx();
2505 key, &result,
false);
2517 concurrent_hash_map_internal::check_outside_tx();
2521 return internal_find(key, &result,
true);
2537 template <
typename K,
2538 typename =
typename std::enable_if<
2539 concurrent_hash_map_internal::
2540 has_transparent_key_equal<hasher>::value,
2545 concurrent_hash_map_internal::check_outside_tx();
2549 return internal_find(key, &result,
true);
2561 concurrent_hash_map_internal::check_outside_tx();
2565 return internal_insert(key, &result,
false, key);
2578 concurrent_hash_map_internal::check_outside_tx();
2582 return internal_insert(key, &result,
true, key);
2595 concurrent_hash_map_internal::check_outside_tx();
2599 return internal_insert(value.first, &result,
false, value);
2612 concurrent_hash_map_internal::check_outside_tx();
2616 return internal_insert(value.first, &result,
true, value);
2628 concurrent_hash_map_internal::check_outside_tx();
2630 return internal_insert(value.first,
nullptr,
false, value);
2643 concurrent_hash_map_internal::check_outside_tx();
2647 return internal_insert(value.first, &result,
false,
2661 concurrent_hash_map_internal::check_outside_tx();
2665 return internal_insert(value.first, &result,
true,
2678 concurrent_hash_map_internal::check_outside_tx();
2680 return internal_insert(value.first,
nullptr,
false,
2689 template <
typename I>
2693 concurrent_hash_map_internal::check_outside_tx();
2695 for (; first != last; ++first)
2707 concurrent_hash_map_internal::check_outside_tx();
2709 insert(il.begin(), il.end());
2720 template <
typename M>
2724 concurrent_hash_map_internal::check_outside_tx();
2727 auto result = internal_insert(key, &acc,
true, key,
2728 std::forward<M>(obj));
2733 acc->second = std::forward<M>(obj);
2748 template <
typename M>
2752 concurrent_hash_map_internal::check_outside_tx();
2755 auto result = internal_insert(key, &acc,
true, std::move(key),
2756 std::forward<M>(obj));
2761 acc->second = std::forward<M>(obj);
2777 typename K,
typename M,
2778 typename =
typename std::enable_if<
2779 concurrent_hash_map_internal::has_transparent_key_equal<
2781 std::is_constructible<key_type, K>::value,
2786 concurrent_hash_map_internal::check_outside_tx();
2790 internal_insert(key, &acc,
true, std::forward<K>(key),
2791 std::forward<M>(obj));
2796 acc->second = std::forward<M>(obj);
2814 concurrent_hash_map_internal::check_outside_tx();
2816 return internal_erase(key);
2838 defragment(
double start_percent = 0,
double amount_percent = 100)
2840 double end_percent = start_percent + amount_percent;
2841 if (start_percent < 0 || start_percent >= 100 ||
2842 end_percent < 0 || end_percent > 100 ||
2843 start_percent >= end_percent) {
2844 throw std::range_error(
"incorrect range");
2847 size_t max_index = mask().load(std::memory_order_acquire);
2848 size_t start_index =
2849 static_cast<size_t>((start_percent * max_index) / 100);
2851 static_cast<size_t>((end_percent * max_index) / 100);
2855 end_index = (std::min)(end_index, max_index);
2857 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2858 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2871 for (
size_t i = end_index + 1; i >= start_index + 1; i--) {
2883 return my_defrag.
run();
2900 template <
typename K,
2901 typename =
typename std::enable_if<
2902 concurrent_hash_map_internal::
2903 has_transparent_key_equal<hasher>::value,
2908 concurrent_hash_map_internal::check_outside_tx();
2910 return internal_erase(key);
2920 bool try_acquire_item(const_accessor *result, node_mutex_t &
mutex,
2929 using mutex_t = MutexType;
2935 vec.emplace_back(base, h,
true );
2936 bucket *b = vec.back().get();
2938 auto node_ptr =
static_cast<node *
>(
2939 b->node_list.get(base->my_pool_uuid));
2943 if (!base->try_acquire_item(&ca,
2951 static_cast<node *
>(node_ptr->next.get(
2952 (base->my_pool_uuid)));
2959 std::vector<bucket_accessor> vec;
2962 template <
typename K>
2963 bool internal_find(
const K &key, const_accessor *result,
bool write);
2965 template <
typename K,
typename... Args>
2966 bool internal_insert(
const K &key, const_accessor *result,
bool write,
2970 template <
bool Bucket_rw_lock,
typename K>
2971 persistent_node_ptr_t
2972 get_node(
const K &key, bucket_accessor &b)
2975 auto n = search_bucket(key, b.get());
2978 if (Bucket_rw_lock && !b.is_writer() &&
2979 !scoped_lock_traits_type::upgrade_to_writer(b)) {
2983 n = search_bucket(key, b.get());
2986 scoped_lock_traits_type::
2987 downgrade_to_reader(b);
2996 template <
typename K>
2997 bool internal_erase(
const K &key);
2999 void clear_segment(segment_index_t s);
3006 template <
typename I>
3016 auto node_ptr =
static_cast<node *
>(
3017 b->node_list.get(this->my_pool_uuid));
3028 node_ptr =
static_cast<node *
>(
3029 node_ptr->next.get((this->my_pool_uuid)));
3034 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3035 typename MutexType,
typename ScopedLockType>
3037 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3038 ScopedLockType>::try_acquire_item(const_accessor *result,
3039 node_mutex_t &mutex,
3043 if (!result->try_acquire(mutex, write)) {
3044 for (detail::atomic_backoff backoff(
true);;) {
3045 if (result->try_acquire(mutex, write))
3048 if (!backoff.bounded_pause())
3056 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3057 typename MutexType,
typename ScopedLockType>
3058 template <
typename K>
3060 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3061 ScopedLockType>::internal_find(
const K &key,
3062 const_accessor *result,
3065 assert(!result || !result->my_node);
3067 hashcode_type m = mask().load(std::memory_order_acquire);
3068 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3069 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3072 assert((m & (m + 1)) == 0);
3074 hashcode_type
const h = hasher{}(key);
3076 persistent_node_ptr_t node;
3082 scoped_lock_traits_type::initial_rw_state(
false));
3083 node = get_node<false>(key, b);
3087 if (check_mask_race(h, m)) {
3098 result, node.get(this->my_pool_uuid)->mutex, write))
3105 std::this_thread::yield();
3107 m = mask().load(std::memory_order_acquire);
3108 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3109 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3114 result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
3115 result->my_hash = h;
3121 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3122 typename MutexType,
typename ScopedLockType>
3123 template <
typename K,
typename... Args>
3125 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3126 ScopedLockType>::internal_insert(
const K &key,
3127 const_accessor *result,
3131 assert(!result || !result->my_node);
3133 hashcode_type m = mask().load(std::memory_order_acquire);
3134 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3135 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3138 assert((m & (m + 1)) == 0);
3140 hashcode_type
const h = hasher{}(key);
3142 persistent_node_ptr_t node;
3143 size_t new_size = 0;
3144 bool inserted =
false;
3150 scoped_lock_traits_type::initial_rw_state(
true));
3151 node = get_node<true>(key, b);
3155 if (check_mask_race(h, m)) {
3161 new_size = insert_new_node(b.get(), node,
3162 std::forward<Args>(args)...);
3169 result, node.get(this->my_pool_uuid)->mutex, write))
3176 std::this_thread::yield();
3178 m = mask().load(std::memory_order_acquire);
3179 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3180 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3185 result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
3186 result->my_hash = h;
3189 check_growth(m, new_size);
3194 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3195 typename MutexType,
typename ScopedLockType>
3196 template <
typename K>
3198 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3199 ScopedLockType>::internal_erase(
const K &key)
3202 hashcode_type
const h = hasher{}(key);
3203 hashcode_type m = mask().load(std::memory_order_acquire);
3204 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3205 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3208 pool_base pop = get_pool_base();
3213 bucket_accessor b(
this, h & m,
3214 scoped_lock_traits_type::initial_rw_state(
true));
3217 node_ptr_t *p = &b->node_list;
3222 detail::static_persistent_pool_pointer_cast<node>(
3223 n)(this->my_pool_uuid)
3225 p = &n(this->my_pool_uuid)->next;
3231 if (check_mask_race(h, m))
3235 }
else if (!b.is_writer() &&
3236 !scoped_lock_traits_type::upgrade_to_writer(b)) {
3237 if (check_mask_race(h, m))
3243 persistent_ptr<node> del = n(this->my_pool_uuid);
3251 if (!try_acquire_item(&acc, del->mutex,
true)) {
3255 std::this_thread::yield();
3257 m = mask().load(std::memory_order_acquire);
3263 assert(pmemobj_tx_stage() == TX_STAGE_NONE);
3265 auto &size_diff = this->thread_size_diff();
3282 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3283 typename MutexType,
typename ScopedLockType>
3288 internal_swap(table);
3291 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3292 typename MutexType,
typename ScopedLockType>
3297 concurrent_hash_map_internal::check_outside_tx();
3300 hashcode_type m = mask();
3304 hashcode_type b = (m + 1) >> 1;
3307 assert((b & (b - 1)) == 0);
3309 for (; b <= m; ++b) {
3310 bucket *bp = get_bucket(b);
3312 concurrent_hash_map_internal::assert_not_locked<mutex_t,
3316 if (bp->is_rehashed(std::memory_order_relaxed) ==
false)
3317 rehash_bucket<true>(bp, b);
3321 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3322 typename MutexType,
typename ScopedLockType>
3326 hashcode_type m = mask();
3328 assert((m & (m + 1)) == 0);
3332 for (segment_index_t b = 0; b <= m; ++b) {
3333 bucket *bp = get_bucket(b);
3334 concurrent_hash_map_internal::assert_not_locked<mutex_t,
3345 assert(this->tls_ptr !=
nullptr);
3346 this->tls_ptr->clear();
3348 this->on_init_size = 0;
3350 segment_index_t s = segment_traits_t::segment_index_of(m);
3352 assert(s + 1 == this->block_table_size ||
3353 !segment_facade_t(this->my_table, s + 1).is_valid());
3368 mask().store(embedded_buckets - 1, std::memory_order_relaxed);
3375 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3376 typename MutexType,
typename ScopedLockType>
3379 ScopedLockType>::clear_segment(segment_index_t s)
3381 segment_facade_t segment(this->my_table, s);
3383 assert(segment.is_valid());
3385 size_type sz = segment.size();
3386 for (segment_index_t i = 0; i < sz; ++i) {
3387 for (node_ptr_t n = segment[i].node_list; n;
3388 n = segment[i].node_list) {
3389 segment[i].node_list = n(this->my_pool_uuid)->next;
3394 if (s >= segment_traits_t::embedded_segments)
3398 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3399 typename MutexType,
typename ScopedLockType>
3404 auto pop = get_pool_base();
3406 reserve(source.
size());
3407 internal_copy(source.
begin(), source.
end());
3410 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3411 typename MutexType,
typename ScopedLockType>
3412 template <
typename I>
3415 ScopedLockType>::internal_copy(I first, I last)
3417 hashcode_type m = mask();
3419 for (; first != last; ++first) {
3420 hashcode_type h = hasher{}(first->first);
3421 bucket *b = get_bucket(h & m);
3423 assert(b->is_rehashed(std::memory_order_relaxed));
3425 detail::persistent_pool_ptr<node> p;
3426 insert_new_node(b, p, *first);
3430 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3431 typename MutexType,
typename ScopedLockType>
3433 operator==(
const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3435 const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3438 if (a.size() != b.size())
3441 typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3442 ScopedLockType>::const_iterator
3446 typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3447 ScopedLockType>::const_iterator j,
3450 for (; i != i_end; ++i) {
3451 j = b.equal_range(i->first).first;
3453 if (j == j_end || !(i->second == j->second))
3460 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3461 typename MutexType,
typename ScopedLockType>
3463 operator!=(
const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3465 const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3471 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3472 typename MutexType,
typename ScopedLockType>
3474 swap(concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &a,
3475 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &b)
Atomic backoff, for time delay.
static void commit()
Manually commit a transaction.
Definition: transaction.hpp:325
static void snapshot(const T *addr, size_t num=1)
Takes a “snapshot” of given elements of type T number (1 by default), located at the given address pt...
Definition: transaction.hpp:428
Custom layout error class.
Definition: pexceptions.hpp:196
Allows write access to elements and combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:2070
pointer operator->() const
Return pointer to associated value in hash table.
Definition: concurrent_hash_map.hpp:2084
reference operator*() const
Return reference to associated value in hash table.
Definition: concurrent_hash_map.hpp:2076
Bucket accessor is to find, rehash, acquire a lock, and access a bucket.
Definition: concurrent_hash_map.hpp:1724
void acquire(concurrent_hash_map *base, const hashcode_type h, bool writer=false)
Find a bucket by masked hashcode, optionally rehash, and acquire the lock.
Definition: concurrent_hash_map.hpp:1754
bool is_writer() const
Check whether bucket is locked for write.
Definition: concurrent_hash_map.hpp:1780
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1799
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1790
Combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:1982
bool empty() const
Definition: concurrent_hash_map.hpp:2001
const_accessor()
Create empty result.
Definition: concurrent_hash_map.hpp:2046
void release()
Release accessor.
Definition: concurrent_hash_map.hpp:2013
const typename concurrent_hash_map::value_type value_type
Type of value.
Definition: concurrent_hash_map.hpp:1994
~const_accessor()
Destroy result after releasing the underlying reference.
Definition: concurrent_hash_map.hpp:2054
const_reference operator*() const
Definition: concurrent_hash_map.hpp:2026
const_pointer operator->() const
Definition: concurrent_hash_map.hpp:2036
Vector of locks to be unlocked at the destruction time.
Definition: concurrent_hash_map.hpp:2927
bucket * push_and_try_lock(concurrent_hash_map *base, hashcode_type h)
Save pointer to the lock in the vector and lock it.
Definition: concurrent_hash_map.hpp:2933
Serial bucket accessor used to access bucket in a serial operations.
Definition: concurrent_hash_map.hpp:1808
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1863
bool is_writer() const
This method is added for consistency with bucket_accessor class.
Definition: concurrent_hash_map.hpp:1844
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1854
Persistent memory aware implementation of Intel TBB concurrent_hash_map.
Definition: concurrent_hash_map.hpp:1628
bool empty() const
Definition: concurrent_hash_map.hpp:2391
const_iterator end() const
Definition: concurrent_hash_map.hpp:2373
iterator end()
Definition: concurrent_hash_map.hpp:2353
size_type bucket_count() const
Definition: concurrent_hash_map.hpp:2409
void insert(std::initializer_list< value_type > il)
Insert initializer list.
Definition: concurrent_hash_map.hpp:2705
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2515
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2659
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2233
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2469
size_type count(const Key &key) const
Definition: concurrent_hash_map.hpp:2429
const_iterator begin() const
Definition: concurrent_hash_map.hpp:2363
size_type size() const
Definition: concurrent_hash_map.hpp:2382
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2641
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2626
~concurrent_hash_map()
free_data should be called before concurrent_hash_map destructor is called.
Definition: concurrent_hash_map.hpp:2323
void clear()
Clear hash map content Not thread safe.
Definition: concurrent_hash_map.hpp:3324
void free_data()
Destroys the concurrent_hash_map.
Definition: concurrent_hash_map.hpp:2302
void swap(concurrent_hash_map &table)
Swap two instances.
Definition: concurrent_hash_map.hpp:3285
iterator begin()
Definition: concurrent_hash_map.hpp:2343
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
Definition: concurrent_hash_map.hpp:3294
bool insert_or_assign(K &&key, M &&obj)
Inserts item if there is no such key-comparable type present already, assigns provided value otherwis...
Definition: concurrent_hash_map.hpp:2784
void defrag_save_nodes(bucket *b, pmem::obj::defrag &defrag)
Internal method used by defragment().
Definition: concurrent_hash_map.hpp:3014
size_type count(const K &key) const
This overload only participates in overload resolution if the qualified-id Hash::transparent_key_equa...
Definition: concurrent_hash_map.hpp:2454
bool find(accessor &result, const K &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2543
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2593
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2610
bool erase(const Key &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2812
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2676
concurrent_hash_map()
Construct empty table.
Definition: concurrent_hash_map.hpp:2093
size_type max_size() const
Upper bound on size.
Definition: concurrent_hash_map.hpp:2400
concurrent_hash_map(I first, I last)
Construction table with copying iteration range.
Definition: concurrent_hash_map.hpp:2135
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2559
bool find(const_accessor &result, const K &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2498
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
Definition: concurrent_hash_map.hpp:3402
concurrent_hash_map & operator=(std::initializer_list< value_type > il)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2255
concurrent_hash_map(std::initializer_list< value_type > il)
Construct table with initializer list.
Definition: concurrent_hash_map.hpp:2147
concurrent_hash_map(concurrent_hash_map &&table)
Move constructor.
Definition: concurrent_hash_map.hpp:2124
bool erase(const K &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2906
bool insert_or_assign(const key_type &key, M &&obj)
Inserts item if there is no such key present already, assigns provided value otherwise.
Definition: concurrent_hash_map.hpp:2722
void insert(I first, I last)
Insert range [first, last)
Definition: concurrent_hash_map.hpp:2691
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2576
bool insert_or_assign(key_type &&key, M &&obj)
Inserts item if there is no such key present already, assigns provided value otherwise.
Definition: concurrent_hash_map.hpp:2750
void runtime_initialize()
Initialize persistent concurrent hash map after process restart.
Definition: concurrent_hash_map.hpp:2165
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
Definition: concurrent_hash_map.hpp:2112
concurrent_hash_map(size_type n)
Construct empty table with n preallocated buckets.
Definition: concurrent_hash_map.hpp:2102
pobj_defrag_result defragment(double start_percent=0, double amount_percent=100)
Defragment the given (by 'start_percent' and 'amount_percent') part of buckets of the hash map.
Definition: concurrent_hash_map.hpp:2838
Defrag class.
Definition: defrag.hpp:83
pobj_defrag_result run()
Starts defragmentation with previously stored pointers.
Definition: defrag.hpp:188
std::enable_if< is_defragmentable< T >), void >::type add(T &t)
Stores address of the referenced object to the defragmentation queue.
Definition: defrag.hpp:112
typename detail::transaction_base< true >::manual manual
C++ manual scope transaction class.
Definition: transaction.hpp:762
static void run(obj::pool_base &pool, std::function< void()> tx, Locks &... locks)
Execute a closure-like transaction and lock locks.
Definition: transaction.hpp:823
Persistent memory resident mutex implementation.
Definition: mutex.hpp:31
Resides on pmem class.
Definition: p.hpp:35
const T & get_ro() const noexcept
Retrieves read-only const reference of the object.
Definition: p.hpp:128
The non-template pool base class.
Definition: pool.hpp:50
Custom transaction error class.
Definition: pexceptions.hpp:176
Commonly used functionality.
A persistent version of thread-local storage.
Persistent_ptr transactional allocation functions for objects.
persistent_ptr< T > operator-(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Subtraction operator for persistent pointers.
Definition: persistent_ptr.hpp:822
p< T > & operator+=(p< T > &lhs, const p< Y > &rhs)
Addition assignment operator overload.
Definition: pext.hpp:94
persistent_ptr< T > operator+(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Addition operator for persistent pointers.
Definition: persistent_ptr.hpp:808
p< T > & operator++(p< T > &pp)
Prefix increment operator overload.
Definition: pext.hpp:48
bool operator!=(const allocator< T, P, Tr > &lhs, const OtherAllocator &rhs)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:536
p< T > & operator--(p< T > &pp)
Prefix decrement operator overload.
Definition: pext.hpp:59
T & get(pmem::obj::array< T, N > &a)
Non-member get function.
Definition: array.hpp:919
bool operator==(standard_alloc_policy< T > const &, standard_alloc_policy< T2 > const &)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:420
p< T > & operator-=(p< T > &lhs, const p< Y > &rhs)
Subtraction assignment operator overload.
Definition: pext.hpp:116
void swap(pmem::obj::array< T, N > &lhs, pmem::obj::array< T, N > &rhs)
Non-member swap function.
Definition: array.hpp:909
pmem::obj::array< T, N >::iterator begin(pmem::obj::array< T, N > &a)
Non-member begin.
Definition: array.hpp:829
Persistent memory namespace.
Definition: allocation_flag.hpp:15
Resides on pmem property template.
Persistent smart pointer.
Pmem-resident shared mutex.
Commonly used SFINAE helpers.
C++ pmemobj transactions.