10 #ifndef PMEMOBJ_CONCURRENT_HASH_MAP_HPP
11 #define PMEMOBJ_CONCURRENT_HASH_MAP_HPP
15 #include <libpmemobj++/detail/pair.hpp>
25 #include <libpmemobj++/detail/persistent_pool_ptr.hpp>
33 #include <initializer_list>
38 #include <type_traits>
48 struct hash<
pmem::obj::p<T>> {
52 return hash<T>()(x.
get_ro());
62 namespace concurrent_hash_map_internal
64 template <
typename SharedMutexT>
65 class shared_mutex_scoped_lock {
66 using rw_mutex_type = SharedMutexT;
69 shared_mutex_scoped_lock(
const shared_mutex_scoped_lock &) =
delete;
70 shared_mutex_scoped_lock &
71 operator=(
const shared_mutex_scoped_lock &) =
delete;
74 shared_mutex_scoped_lock() : mutex(nullptr), is_writer(false)
79 shared_mutex_scoped_lock(rw_mutex_type &m,
bool write =
true)
86 ~shared_mutex_scoped_lock()
94 acquire(rw_mutex_type &m,
bool write =
true)
101 mutex->lock_shared();
111 rw_mutex_type *m = mutex;
125 try_acquire(rw_mutex_type &m,
bool write =
true)
130 result = write ? m.try_lock() : m.try_lock_shared();
141 rw_mutex_type *mutex;
149 template <
typename ScopedLockType>
150 using scoped_lock_upgrade_to_writer =
151 decltype(std::declval<ScopedLockType>().upgrade_to_writer());
153 template <
typename ScopedLockType>
154 using scoped_lock_has_upgrade_to_writer =
155 detail::supports<ScopedLockType, scoped_lock_upgrade_to_writer>;
157 template <
typename ScopedLockType>
158 using scoped_lock_downgrade_to_reader =
159 decltype(std::declval<ScopedLockType>().downgrade_to_reader());
161 template <
typename ScopedLockType>
162 using scoped_lock_has_downgrade_to_reader =
163 detail::supports<ScopedLockType, scoped_lock_downgrade_to_reader>;
165 template <
typename ScopedLockType,
166 bool = scoped_lock_has_upgrade_to_writer<ScopedLockType>::value
167 &&scoped_lock_has_downgrade_to_reader<ScopedLockType>::value>
168 class scoped_lock_traits {
170 using scope_lock_type = ScopedLockType;
173 initial_rw_state(
bool write)
180 upgrade_to_writer(scope_lock_type &lock)
182 return lock.upgrade_to_writer();
186 downgrade_to_reader(scope_lock_type &lock)
188 return lock.downgrade_to_reader();
192 template <
typename ScopedLockType>
193 class scoped_lock_traits<ScopedLockType, false> {
195 using scope_lock_type = ScopedLockType;
198 initial_rw_state(
bool write)
206 upgrade_to_writer(scope_lock_type &lock)
215 downgrade_to_reader(scope_lock_type &lock)
227 template <
typename Key,
typename T,
typename Hash = std::hash<Key>,
228 typename KeyEqual = std::equal_to<Key>,
229 typename MutexType = pmem::obj::shared_mutex,
230 typename ScopedLockType = concurrent_hash_map_
internal::
231 shared_mutex_scoped_lock<MutexType>>
232 class concurrent_hash_map;
235 namespace concurrent_hash_map_internal
241 if (pmemobj_tx_stage() != TX_STAGE_NONE)
243 "Function called inside transaction scope.");
246 template <
typename Hash>
247 using transparent_key_equal =
typename Hash::transparent_key_equal;
249 template <
typename Hash>
250 using has_transparent_key_equal = detail::supports<Hash, transparent_key_equal>;
252 template <
typename Hash,
typename Pred,
253 bool = has_transparent_key_equal<Hash>::value>
254 struct key_equal_type {
255 using type =
typename Hash::transparent_key_equal;
258 template <
typename Hash,
typename Pred>
259 struct key_equal_type<Hash, Pred, false> {
263 template <
typename Mutex,
typename ScopedLockType>
265 assert_not_locked(Mutex &mtx)
268 ScopedLockType scoped_lock;
269 assert(scoped_lock.try_acquire(mtx));
270 scoped_lock.release();
276 template <
typename Key,
typename T,
typename MutexType,
typename ScopedLockType>
277 struct hash_map_node {
279 using mutex_t = MutexType;
282 using scoped_t = ScopedLockType;
284 using value_type = detail::pair<const Key, T>;
287 using node_ptr_t = detail::persistent_pool_ptr<
288 hash_map_node<Key, T, mutex_t, scoped_t>>;
299 hash_map_node(
const node_ptr_t &_next,
const Key &key)
301 item(std::piecewise_construct, std::forward_as_tuple(key),
302 std::forward_as_tuple())
306 hash_map_node(
const node_ptr_t &_next,
const Key &key,
const T &t)
307 : next(_next), item(key, t)
311 hash_map_node(
const node_ptr_t &_next, value_type &&i)
312 : next(_next), item(std::move(i))
316 template <
typename... Args>
317 hash_map_node(
const node_ptr_t &_next, Args &&... args)
318 : next(_next), item(std::forward<Args>(args)...)
322 hash_map_node(
const node_ptr_t &_next,
const value_type &i)
323 : next(_next), item(i)
328 hash_map_node(
const hash_map_node &) =
delete;
331 hash_map_node &operator=(
const hash_map_node &) =
delete;
338 template <
typename Bucket>
339 class segment_traits {
342 using segment_index_t = size_t;
343 using size_type = size_t;
344 using bucket_type = Bucket;
348 constexpr
static size_type max_allocation_size = PMEMOBJ_MAX_ALLOC_SIZE;
351 constexpr
static segment_index_t first_big_block = 27;
356 constexpr
static size_type big_block_size = size_type(1)
360 static_assert((big_block_size *
sizeof(bucket_type)) <
362 "Block size exceeds max_allocation_size");
365 constexpr
static segment_index_t
366 first_block_in_segment(segment_index_t seg)
368 return seg < first_big_block
371 (segment_index_t(1) << (seg - first_big_block)) - 1);
375 constexpr
static size_type
376 blocks_in_segment(segment_index_t seg)
378 return seg < first_big_block
380 : segment_index_t(1) << (seg - first_big_block);
384 constexpr
static size_type
385 block_size(segment_index_t b)
387 return b < first_big_block ? segment_size(b ? b : 1)
393 constexpr
static segment_index_t embedded_segments = 1;
396 constexpr
static size_type embedded_buckets = 1 << embedded_segments;
399 constexpr
static segment_index_t number_of_segments = 32;
402 static const size_type first_block = 8;
405 constexpr
static segment_index_t
408 return first_block_in_segment(number_of_segments);
412 static segment_index_t
413 segment_index_of(size_type index)
415 return segment_index_t(detail::Log2(index | 1));
419 constexpr
static segment_index_t
420 segment_base(segment_index_t k)
422 return (segment_index_t(1) << k) & ~segment_index_t(1);
426 constexpr
static size_type
427 segment_size(segment_index_t k)
429 return size_type(1) << k;
432 embedded_segments < first_big_block,
433 "Number of embedded segments cannot exceed max_allocation_size");
452 template <
typename BlockTable,
typename SegmentTraits,
bool is_const>
453 class segment_facade_impl :
public SegmentTraits {
455 using traits_type = SegmentTraits;
456 using traits_type::block_size;
457 using traits_type::blocks_in_segment;
458 using traits_type::embedded_buckets;
459 using traits_type::embedded_segments;
460 using traits_type::first_block;
461 using traits_type::first_block_in_segment;
462 using traits_type::segment_base;
463 using traits_type::segment_size;
466 using table_reference =
467 typename std::conditional<is_const,
const BlockTable &,
470 using table_pointer =
471 typename std::conditional<is_const,
const BlockTable *,
474 using bucket_type =
typename traits_type::bucket_type;
475 using segment_index_t =
typename traits_type::segment_index_t;
476 using size_type =
typename traits_type::size_type;
479 segment_facade_impl(table_reference table, segment_index_t s)
480 : my_table(&table), my_seg(s)
482 assert(my_seg < traits_type::number_of_segments);
486 segment_facade_impl(
const segment_facade_impl &src)
487 : my_table(src.my_table), my_seg(src.my_seg)
491 segment_facade_impl(segment_facade_impl &&src) =
default;
494 segment_facade_impl &
495 operator=(
const segment_facade_impl &src)
497 my_table = src.my_table;
503 segment_facade_impl &
504 operator=(segment_facade_impl &&src)
506 my_table = src.my_table;
517 bucket_type &operator[](size_type i)
const
521 segment_index_t table_block = first_block_in_segment(my_seg);
522 size_type b_size = block_size(table_block);
524 table_block += i / b_size;
527 return (*my_table)[table_block][
static_cast<std::ptrdiff_t
>(i)];
533 segment_facade_impl &
546 segment_facade_impl tmp = *
this;
554 segment_facade_impl &
567 segment_facade_impl tmp = *
this;
575 segment_facade_impl &
585 segment_facade_impl &
598 return segment_facade_impl(*(this->my_table),
608 return segment_facade_impl(*(this->my_table),
616 enable(pool_base &pop)
618 assert(my_seg >= embedded_segments);
620 if (my_seg < first_block) {
621 enable_first_block(pop);
623 enable_big_segment(pop);
633 assert(my_seg >= embedded_segments);
635 if (my_seg < first_block) {
636 if (my_seg == embedded_segments) {
637 size_type sz = segment_size(first_block) -
639 delete_persistent<bucket_type[]>(
640 (*my_table)[my_seg], sz);
642 (*my_table)[my_seg] =
nullptr;
644 block_range blocks = segment_blocks(my_seg);
646 for (segment_index_t b = blocks.first;
647 b < blocks.second; ++b) {
648 if ((*my_table)[b] !=
nullptr) {
649 delete_persistent<bucket_type[]>(
650 (*my_table)[b], block_size(b));
651 (*my_table)[b] =
nullptr;
663 return segment_size(my_seg ? my_seg : 1);
674 block_range blocks = segment_blocks(my_seg);
676 for (segment_index_t b = blocks.first; b < blocks.second; ++b) {
677 if ((*my_table)[b] ==
nullptr)
685 using block_range = std::pair<segment_index_t, segment_index_t>;
691 segment_blocks(segment_index_t seg)
693 segment_index_t
begin = first_block_in_segment(seg);
695 return block_range(
begin,
begin + blocks_in_segment(seg));
699 enable_first_block(pool_base &pop)
701 assert(my_seg == embedded_segments);
703 transaction::manual tx(pop);
706 segment_size(first_block) - embedded_buckets;
707 (*my_table)[my_seg] =
708 make_persistent<bucket_type[]>(sz);
710 persistent_ptr<bucket_type> base =
711 (*my_table)[embedded_segments].raw();
713 for (segment_index_t s = my_seg + 1; s < first_block;
716 static_cast<std::ptrdiff_t
>(
718 segment_base(my_seg));
720 (*my_table)[s] = (base + off).raw();
728 enable_big_segment(pool_base &pop)
730 block_range blocks = segment_blocks(my_seg);
732 transaction::manual tx(pop);
734 for (segment_index_t b = blocks.first;
735 b < blocks.second; ++b) {
736 assert((*my_table)[b] ==
nullptr);
737 (*my_table)[b] = make_persistent<bucket_type[]>(
746 table_pointer my_table;
749 segment_index_t my_seg;
758 template <
typename Key,
typename T,
typename MutexType,
typename ScopedLockType>
759 class hash_map_base {
761 using mutex_t = MutexType;
762 using scoped_t = ScopedLockType;
765 using size_type = size_t;
768 using hashcode_type = size_t;
771 using node = hash_map_node<Key, T, mutex_t, scoped_t>;
774 using node_ptr_t = detail::persistent_pool_ptr<node>;
778 using mutex_t = MutexType;
779 using scoped_t = ScopedLockType;
785 p<std::atomic<uint64_t>> rehashed;
788 node_ptr_t node_list;
791 bucket() : node_list(nullptr)
793 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
794 VALGRIND_HG_DISABLE_CHECKING(&rehashed,
797 rehashed.get_rw() =
false;
806 is_rehashed(std::memory_order order)
808 return rehashed.get_ro().load(order);
812 set_rehashed(std::memory_order order)
814 rehashed.get_rw().store(
true, order);
818 bucket(
const bucket &) =
delete;
821 bucket &operator=(
const bucket &) =
delete;
825 using segment_traits_t = segment_traits<bucket>;
828 using segment_index_t =
typename segment_traits_t::segment_index_t;
831 static const size_type embedded_buckets =
832 segment_traits_t::embedded_buckets;
835 static const size_type first_block = segment_traits_t::first_block;
838 constexpr
static size_type block_table_size =
839 segment_traits_t::number_of_blocks();
842 using segment_ptr_t = persistent_ptr<bucket[]>;
845 using bucket_ptr_t = persistent_ptr<bucket>;
848 using blocks_table_t = segment_ptr_t[block_table_size];
855 p<int64_t> size_diff = 0;
856 std::aligned_storage<56, 8> padding;
859 using tls_t = detail::enumerable_thread_specific<tls_data_t>;
861 enum feature_flags : uint32_t { FEATURE_CONSISTENT_SIZE = 1 };
866 p<uint32_t> incompat;
872 p<uint64_t> my_pool_uuid;
876 features layout_features;
880 std::aligned_storage<
sizeof(size_t),
sizeof(
size_t)>::type
885 std::atomic<hashcode_type> my_mask;
888 std::size_t value_size;
891 std::aligned_storage<24, 8>::type padding1;
897 blocks_table_t my_table;
902 std::atomic<size_type> my_size;
905 std::aligned_storage<24, 8>::type padding2;
908 persistent_ptr<tls_t> tls_ptr;
915 p<size_t> on_init_size;
918 std::aligned_storage<40, 8>::type reserved;
921 segment_enable_mutex_t my_segment_enable_mutex;
924 bucket my_embedded_segment[embedded_buckets];
929 static constexpr features
932 return {FEATURE_CONSISTENT_SIZE, 0};
935 const std::atomic<hashcode_type> &
936 mask() const noexcept
941 std::atomic<hashcode_type> &
950 return my_size.load(std::memory_order_relaxed);
956 assert(this->tls_ptr !=
nullptr);
957 return this->tls_ptr->local().size_diff;
964 assert(this->tls_ptr !=
nullptr);
966 pool_base pop = pool_base{pmemobj_pool_by_ptr(
this)};
968 int64_t last_run_size = 0;
969 for (
auto &data : *tls_ptr)
970 last_run_size += data.size_diff;
973 assert(last_run_size >= 0 ||
974 static_cast<int64_t
>(
static_cast<size_t>(last_run_size) +
978 on_init_size +=
static_cast<size_t>(last_run_size);
982 this->my_size = on_init_size;
986 using const_segment_facade_t =
987 segment_facade_impl<blocks_table_t, segment_traits_t, true>;
990 using segment_facade_t =
991 segment_facade_impl<blocks_table_t, segment_traits_t, false>;
997 sizeof(size_type) ==
sizeof(std::atomic<size_type>),
998 "std::atomic should have the same layout as underlying integral type");
1000 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1001 VALGRIND_HG_DISABLE_CHECKING(&my_mask,
sizeof(my_mask));
1003 layout_features = {0, 0};
1005 PMEMoid oid = pmemobj_oid(
this);
1007 assert(!OID_IS_NULL(oid));
1009 my_pool_uuid = oid.pool_uuid_lo;
1011 pool_base pop = get_pool_base();
1013 for (size_type i = 0; i < segment_traits_t::embedded_segments;
1016 pmemobj_oid(my_embedded_segment +
1017 segment_traits_t::segment_base(i));
1018 segment_facade_t seg(my_table, i);
1019 mark_rehashed<false>(pop, seg);
1026 this->tls_ptr =
nullptr;
1037 auto pop = get_pool_base();
1039 if ((layout_features.compat & FEATURE_CONSISTENT_SIZE) &&
1042 delete_persistent<tls_t>(tls_ptr);
1054 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1055 VALGRIND_HG_DISABLE_CHECKING(&my_size,
sizeof(my_size));
1056 VALGRIND_HG_DISABLE_CHECKING(&my_mask,
sizeof(my_mask));
1058 #if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED
1059 VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_size,
sizeof(my_size));
1060 VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_mask,
sizeof(my_mask));
1063 hashcode_type m = embedded_buckets - 1;
1065 const_segment_facade_t segment(
1066 my_table, segment_traits_t::embedded_segments);
1068 while (segment.is_valid()) {
1069 m += segment.size();
1073 mask().store(m, std::memory_order_relaxed);
1079 template <
bool Flush = true>
1081 mark_rehashed(pool_base &pop, segment_facade_t &segment)
1083 for (size_type i = 0; i < segment.size(); ++i) {
1084 bucket *b = &(segment[i]);
1086 assert_not_locked<mutex_t, scoped_t>(b->mutex);
1088 b->set_rehashed(std::memory_order_relaxed);
1093 for (size_type i = 0; i < segment.size(); ++i) {
1094 bucket *b = &(segment[i]);
1095 pop.flush(b->rehashed);
1106 enable_segment(segment_index_t k,
bool is_initial =
false)
1110 pool_base pop = get_pool_base();
1113 if (k >= first_block) {
1114 segment_facade_t new_segment(my_table, k);
1116 sz = new_segment.size();
1117 if (!new_segment.is_valid())
1118 new_segment.enable(pop);
1121 mark_rehashed(pop, new_segment);
1128 assert(k == segment_traits_t::embedded_segments);
1130 for (segment_index_t i = k; i < first_block; ++i) {
1131 segment_facade_t new_segment(my_table, i);
1133 if (!new_segment.is_valid())
1134 new_segment.enable(pop);
1137 mark_rehashed(pop, new_segment);
1141 sz = segment_traits_t::segment_size(first_block);
1143 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1144 ANNOTATE_HAPPENS_BEFORE(&my_mask);
1146 mask().store(sz - 1, std::memory_order_release);
1154 get_bucket(hashcode_type h)
const
1156 segment_index_t s = segment_traits_t::segment_index_of(h);
1158 h -= segment_traits_t::segment_base(s);
1160 const_segment_facade_t segment(my_table, s);
1162 assert(segment.is_valid());
1164 return &(segment[h]);
1171 check_mask_race(hashcode_type h, hashcode_type &m)
const
1173 hashcode_type m_now, m_old = m;
1175 m_now = mask().load(std::memory_order_acquire);
1176 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1177 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
1181 return check_rehashing_collision(h, m_old, m = m_now);
1190 check_rehashing_collision(hashcode_type h, hashcode_type m_old,
1191 hashcode_type m)
const
1195 if ((h & m_old) != (h & m)) {
1200 for (++m_old; !(h & m_old); m_old <<= 1)
1203 m_old = (m_old << 1) - 1;
1205 assert((m_old & (m_old + 1)) == 0 && m_old <= m);
1208 bucket *b = get_bucket(h & m_old);
1209 return b->is_rehashed(std::memory_order_acquire);
1219 template <
typename Node,
typename... Args>
1221 insert_new_node_internal(bucket *b,
1222 detail::persistent_pool_ptr<Node> &new_node,
1225 assert(pmemobj_tx_stage() == TX_STAGE_WORK);
1227 new_node = pmem::obj::make_persistent<Node>(
1228 b->node_list, std::forward<Args>(args)...);
1229 b->node_list = new_node;
1236 template <
typename Node,
typename... Args>
1238 insert_new_node(bucket *b, detail::persistent_pool_ptr<Node> &new_node,
1241 pool_base pop = get_pool_base();
1248 if (pmemobj_tx_stage() == TX_STAGE_WORK) {
1249 insert_new_node_internal(b, new_node,
1250 std::forward<Args>(args)...);
1251 this->on_init_size++;
1253 auto &size_diff = thread_size_diff();
1256 insert_new_node_internal(
1258 std::forward<Args>(args)...);
1264 return ++(this->my_size);
1272 check_growth(hashcode_type m, size_type sz)
1275 segment_index_t new_seg =
1276 static_cast<segment_index_t
>(detail::Log2(
1280 assert(segment_facade_t(my_table, new_seg - 1)
1283 std::unique_lock<segment_enable_mutex_t> lock(
1284 my_segment_enable_mutex, std::try_to_lock);
1287 if (mask().load(std::memory_order_relaxed) ==
1291 enable_segment(new_seg);
1305 reserve(size_type buckets)
1312 bool is_initial = this->size() == 0;
1314 for (size_type m = mask(); buckets > m; m = mask())
1316 segment_traits_t::segment_index_of(m + 1),
1325 internal_swap(hash_map_base<Key, T, mutex_t, scoped_t> &table)
1327 pool_base p = get_pool_base();
1329 transaction::manual tx(p);
1331 this->my_pool_uuid.swap(table.my_pool_uuid);
1342 this->mask() = table.mask().exchange(
1343 this->mask(), std::memory_order_relaxed);
1345 this->my_size = table.my_size.exchange(
1346 this->my_size, std::memory_order_relaxed);
1349 std::swap(this->tls_ptr, table.tls_ptr);
1351 for (size_type i = 0; i < embedded_buckets; ++i)
1352 this->my_embedded_segment[i].node_list.swap(
1353 table.my_embedded_segment[i].node_list);
1355 for (size_type i = segment_traits_t::embedded_segments;
1356 i < block_table_size; ++i)
1357 this->my_table[i].
swap(table.my_table[i]);
1371 pmemobj_pool_by_oid(PMEMoid{my_pool_uuid, 0});
1373 return pool_base(pop);
1382 template <
typename Container,
bool is_const>
1383 class hash_map_iterator {
1385 using iterator_category = std::forward_iterator_tag;
1386 using difference_type = ptrdiff_t;
1387 using map_type = Container;
1388 using value_type =
typename map_type::value_type;
1389 using node =
typename map_type::node;
1390 using bucket =
typename map_type::bucket;
1391 using map_ptr =
typename std::conditional<is_const,
const map_type *,
1394 typename std::conditional<is_const,
1395 typename map_type::const_reference,
1396 typename map_type::reference>::type;
1398 typename std::conditional<is_const,
1399 typename map_type::const_pointer,
1400 typename map_type::pointer>::type;
1402 template <
typename C,
bool M,
bool U>
1403 friend bool operator==(
const hash_map_iterator<C, M> &i,
1404 const hash_map_iterator<C, U> &j);
1406 template <
typename C,
bool M,
bool U>
1407 friend bool operator!=(
const hash_map_iterator<C, M> &i,
1408 const hash_map_iterator<C, U> &j);
1410 friend class hash_map_iterator<map_type, true>;
1412 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
1414 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
1415 typename MutexType,
typename ScopedLockType>
1416 friend class ::pmem::obj::concurrent_hash_map;
1420 hash_map_iterator(map_ptr map,
size_t index)
1421 : my_map(map), my_index(index), my_bucket(nullptr), my_node(nullptr)
1423 if (my_index <= my_map->mask()) {
1424 bucket_accessor acc(my_map, my_index);
1425 my_bucket = acc.get();
1426 my_node =
static_cast<node *
>(
1427 my_bucket->node_list.get(my_map->my_pool_uuid));
1430 advance_to_next_bucket();
1437 hash_map_iterator() =
default;
1440 hash_map_iterator(
const hash_map_iterator &other)
1441 : my_map(other.my_map),
1442 my_index(other.my_index),
1443 my_bucket(other.my_bucket),
1444 my_node(other.my_node)
1449 template <
typename U = void,
1450 typename =
typename std::enable_if<is_const, U>::type>
1451 hash_map_iterator(
const hash_map_iterator<map_type, false> &other)
1452 : my_map(other.my_map),
1453 my_index(other.my_index),
1454 my_bucket(other.my_bucket),
1455 my_node(other.my_node)
1459 hash_map_iterator &operator=(
const hash_map_iterator &it) =
default;
1462 reference operator*()
const
1465 return my_node->item;
1469 pointer operator->()
const
1471 return &operator*();
1478 my_node =
static_cast<node *
>(
1479 my_node->next.get((my_map->my_pool_uuid)));
1482 advance_to_next_bucket();
1491 hash_map_iterator old(*
this);
1498 map_ptr my_map =
nullptr;
1501 size_t my_index = 0;
1504 bucket *my_bucket =
nullptr;
1507 node *my_node =
nullptr;
1509 class bucket_accessor {
1511 bucket_accessor(map_ptr m,
size_t index)
1513 my_bucket = m->get_bucket(index);
1527 advance_to_next_bucket()
1529 size_t k = my_index + 1;
1533 while (k <= my_map->mask()) {
1534 bucket_accessor acc(my_map, k);
1535 my_bucket = acc.get();
1537 if (my_bucket->node_list) {
1538 my_node =
static_cast<node *
>(
1539 my_bucket->node_list.get(
1540 my_map->my_pool_uuid));
1556 template <
typename Container,
bool M,
bool U>
1558 operator==(
const hash_map_iterator<Container, M> &i,
1559 const hash_map_iterator<Container, U> &j)
1561 return i.my_node == j.my_node && i.my_map == j.my_map;
1564 template <
typename Container,
bool M,
bool U>
1566 operator!=(
const hash_map_iterator<Container, M> &i,
1567 const hash_map_iterator<Container, U> &j)
1569 return i.my_node != j.my_node || i.my_map != j.my_map;
1620 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
1621 typename MutexType,
typename ScopedLockType>
1623 :
protected concurrent_hash_map_internal::hash_map_base<Key, T, MutexType,
1625 template <
typename Container,
bool is_const>
1626 friend class concurrent_hash_map_internal::hash_map_iterator;
1629 using size_type =
typename concurrent_hash_map_internal::hash_map_base<
1630 Key, T, MutexType, ScopedLockType>::size_type;
1631 using hashcode_type =
1632 typename concurrent_hash_map_internal::hash_map_base<
1633 Key, T, MutexType, ScopedLockType>::hashcode_type;
1634 using key_type = Key;
1635 using mapped_type = T;
1636 using value_type =
typename concurrent_hash_map_internal::hash_map_base<
1637 Key, T, MutexType, ScopedLockType>::node::value_type;
1638 using difference_type = ptrdiff_t;
1639 using pointer = value_type *;
1640 using const_pointer =
const value_type *;
1641 using reference = value_type &;
1642 using const_reference =
const value_type &;
1643 using iterator = concurrent_hash_map_internal::hash_map_iterator<
1645 using const_iterator = concurrent_hash_map_internal::hash_map_iterator<
1647 using hasher = Hash;
1648 using key_equal =
typename concurrent_hash_map_internal::key_equal_type<
1649 Hash, KeyEqual>::type;
1652 using mutex_t = MutexType;
1653 using scoped_t = ScopedLockType;
1657 using hash_map_base =
1658 concurrent_hash_map_internal::hash_map_base<Key, T, mutex_t,
1660 using hash_map_base::calculate_mask;
1661 using hash_map_base::check_growth;
1662 using hash_map_base::check_mask_race;
1663 using hash_map_base::embedded_buckets;
1664 using hash_map_base::FEATURE_CONSISTENT_SIZE;
1665 using hash_map_base::get_bucket;
1666 using hash_map_base::get_pool_base;
1667 using hash_map_base::header_features;
1668 using hash_map_base::insert_new_node;
1669 using hash_map_base::internal_swap;
1670 using hash_map_base::layout_features;
1671 using hash_map_base::mask;
1672 using hash_map_base::reserve;
1673 using tls_t =
typename hash_map_base::tls_t;
1674 using node =
typename hash_map_base::node;
1675 using node_mutex_t =
typename node::mutex_t;
1676 using node_ptr_t =
typename hash_map_base::node_ptr_t;
1677 using bucket =
typename hash_map_base::bucket;
1678 using bucket_lock_type =
typename bucket::scoped_t;
1679 using segment_index_t =
typename hash_map_base::segment_index_t;
1680 using segment_traits_t =
typename hash_map_base::segment_traits_t;
1681 using segment_facade_t =
typename hash_map_base::segment_facade_t;
1682 using scoped_lock_traits_type =
1683 concurrent_hash_map_internal::scoped_lock_traits<scoped_t>;
1686 using persistent_node_ptr_t = detail::persistent_pool_ptr<node>;
1689 delete_node(
const node_ptr_t &n)
1691 delete_persistent<node>(
1692 detail::static_persistent_pool_pointer_cast<node>(n)
1693 .get_persistent_ptr(this->my_pool_uuid));
1696 template <
typename K>
1697 persistent_node_ptr_t
1698 search_bucket(
const K &key, bucket *b)
const
1700 assert(b->is_rehashed(std::memory_order_relaxed));
1702 persistent_node_ptr_t n =
1703 detail::static_persistent_pool_pointer_cast<node>(
1708 n.get(this->my_pool_uuid)->item.first)) {
1709 n = detail::static_persistent_pool_pointer_cast<node>(
1710 n.get(this->my_pool_uuid)->next);
1726 bucket_lock_type::mutex = b.bucket_lock_type::mutex;
1727 bucket_lock_type::is_writer =
1728 b.bucket_lock_type::is_writer;
1730 b.bucket_lock_type::mutex =
nullptr;
1731 b.bucket_lock_type::is_writer =
false;
1735 const hashcode_type h,
bool writer =
false)
1746 bool writer =
false)
1748 my_b = base->get_bucket(h);
1750 if (my_b->is_rehashed(std::memory_order_acquire) ==
1752 bucket_lock_type::try_acquire(this->my_b->mutex,
1754 if (my_b->is_rehashed(
1755 std::memory_order_relaxed) ==
1758 base->rehash_bucket<
false>(my_b, h);
1761 bucket_lock_type::acquire(my_b->mutex, writer);
1764 assert(my_b->is_rehashed(std::memory_order_relaxed));
1773 return bucket_lock_type::is_writer;
1804 const hashcode_type h,
1805 bool writer =
false)
1807 acquire(base, h, writer);
1815 bool writer =
false)
1817 my_b = base->get_bucket(h);
1819 if (my_b->is_rehashed(std::memory_order_relaxed) ==
1822 base->rehash_bucket<
true>(my_b, h);
1825 assert(my_b->is_rehashed(std::memory_order_relaxed));
1861 get_hash_code(node_ptr_t &n)
1864 detail::static_persistent_pool_pointer_cast<node>(n)(
1869 template <
bool serial>
1871 rehash_bucket(bucket *b_new,
const hashcode_type h)
1873 using accessor_type =
typename std::conditional<
1874 serial, serial_bucket_accessor, bucket_accessor>::type;
1876 using scoped_lock_traits_type =
1877 concurrent_hash_map_internal::scoped_lock_traits<
1883 pool_base pop = get_pool_base();
1884 node_ptr_t *p_new = &(b_new->node_list);
1888 if (*p_new !=
nullptr) {
1889 assert(!b_new->is_rehashed(std::memory_order_relaxed));
1891 b_new->set_rehashed(std::memory_order_relaxed);
1892 pop.persist(b_new->rehashed);
1898 hashcode_type mask = (1u << detail::Log2(h)) - 1;
1899 assert((h & mask) < h);
1900 accessor_type b_old(
1902 scoped_lock_traits_type::initial_rw_state(
true));
1906 mask = (mask << 1) | 1;
1907 assert((mask & (mask + 1)) == 0 && (h & mask) == h);
1910 for (node_ptr_t *p_old = &(b_old->node_list),
1913 hashcode_type c = get_hash_code(n);
1915 hashcode_type bmask = h & (mask >> 1);
1919 : (1u << (detail::Log2(bmask) + 1)) - 1;
1921 assert((c & bmask) == (h & bmask));
1924 if ((c & mask) == h) {
1925 if (!b_old.is_writer() &&
1926 !scoped_lock_traits_type::
1927 upgrade_to_writer(b_old)) {
1937 *p_old = n(this->my_pool_uuid)->next;
1939 p_new = &(n(this->my_pool_uuid)->next);
1942 p_old = &(n(this->my_pool_uuid)->next);
1950 b_new->set_rehashed(std::memory_order_release);
1951 pop.persist(b_new->rehashed);
1955 check_incompat_features()
1957 if (layout_features.incompat != header_features().incompat)
1959 "Incompat flags mismatch, for more details go to: https://pmem.io/pmdk/cpp_obj/ \n");
1961 if ((layout_features.compat & FEATURE_CONSISTENT_SIZE) &&
1962 this->value_size !=
sizeof(value_type))
1964 "Size of value_type is different than the one stored in the pool \n");
1973 :
protected node::scoped_t {
1978 using node::scoped_t::try_acquire;
1985 const typename concurrent_hash_map::value_type;
2006 concurrent_hash_map_internal::check_outside_tx();
2009 node::scoped_t::release();
2021 return my_node->item;
2039 concurrent_hash_map_internal::check_outside_tx();
2054 hashcode_type my_hash;
2069 assert(this->my_node);
2071 return this->my_node->item;
2107 reserve(table.
size());
2125 template <
typename I>
2130 reserve(
static_cast<size_type
>(std::distance(first, last)));
2158 check_incompat_features();
2166 if (!(layout_features.compat & FEATURE_CONSISTENT_SIZE)) {
2168 std::distance(this->
begin(), this->
end());
2169 assert(actual_size >= 0);
2171 this->my_size =
static_cast<size_t>(actual_size);
2173 auto pop = get_pool_base();
2175 this->tls_ptr = make_persistent<tls_t>();
2176 this->on_init_size =
2177 static_cast<size_t>(actual_size);
2178 this->value_size =
sizeof(value_type);
2180 layout_features.compat |=
2181 FEATURE_CONSISTENT_SIZE;
2184 assert(this->tls_ptr !=
nullptr);
2185 this->tls_restore();
2188 assert(this->
size() ==
2189 size_type(std::distance(this->
begin(), this->
end())));
2193 "runtime_initialize(bool) is now deprecated, use runtime_initialize(void)")]]
void
2196 check_incompat_features();
2200 if (!graceful_shutdown) {
2202 std::distance(this->
begin(), this->
end());
2203 assert(actual_size >= 0);
2204 this->my_size =
static_cast<size_type
>(actual_size);
2206 assert(this->
size() ==
2207 size_type(std::distance(this->
begin(),
2223 concurrent_hash_map &
2226 if (
this != &table) {
2265 void rehash(size_type n = 0);
2293 auto pop = get_pool_base();
2331 return iterator(
this, 0);
2341 return iterator(
this, mask() + 1);
2351 return const_iterator(
this, 0);
2361 return const_iterator(
this, mask() + 1);
2370 return hash_map_base::size();
2379 return this->
size() == 0;
2388 return (~size_type(0)) /
sizeof(node);
2417 concurrent_hash_map_internal::check_outside_tx();
2420 key,
nullptr,
false);
2434 template <
typename K,
2435 typename =
typename std::enable_if<
2436 concurrent_hash_map_internal::
2437 has_transparent_key_equal<hasher>::value,
2442 concurrent_hash_map_internal::check_outside_tx();
2445 key,
nullptr,
false);
2457 concurrent_hash_map_internal::check_outside_tx();
2462 key, &result,
false);
2478 template <
typename K,
2479 typename =
typename std::enable_if<
2480 concurrent_hash_map_internal::
2481 has_transparent_key_equal<hasher>::value,
2486 concurrent_hash_map_internal::check_outside_tx();
2491 key, &result,
false);
2503 concurrent_hash_map_internal::check_outside_tx();
2507 return internal_find(key, &result,
true);
2523 template <
typename K,
2524 typename =
typename std::enable_if<
2525 concurrent_hash_map_internal::
2526 has_transparent_key_equal<hasher>::value,
2531 concurrent_hash_map_internal::check_outside_tx();
2535 return internal_find(key, &result,
true);
2547 concurrent_hash_map_internal::check_outside_tx();
2551 return internal_insert(key, &result,
false, key);
2564 concurrent_hash_map_internal::check_outside_tx();
2568 return internal_insert(key, &result,
true, key);
2581 concurrent_hash_map_internal::check_outside_tx();
2585 return internal_insert(value.first, &result,
false, value);
2598 concurrent_hash_map_internal::check_outside_tx();
2602 return internal_insert(value.first, &result,
true, value);
2614 concurrent_hash_map_internal::check_outside_tx();
2616 return internal_insert(value.first,
nullptr,
false, value);
2629 concurrent_hash_map_internal::check_outside_tx();
2633 return internal_insert(value.first, &result,
false,
2647 concurrent_hash_map_internal::check_outside_tx();
2651 return internal_insert(value.first, &result,
true,
2664 concurrent_hash_map_internal::check_outside_tx();
2666 return internal_insert(value.first,
nullptr,
false,
2675 template <
typename I>
2679 concurrent_hash_map_internal::check_outside_tx();
2681 for (; first != last; ++first)
2693 concurrent_hash_map_internal::check_outside_tx();
2695 insert(il.begin(), il.end());
2706 template <
typename M>
2710 concurrent_hash_map_internal::check_outside_tx();
2713 auto result = internal_insert(key, &acc,
true, key,
2714 std::forward<M>(obj));
2719 acc->second = std::forward<M>(obj);
2734 template <
typename M>
2738 concurrent_hash_map_internal::check_outside_tx();
2741 auto result = internal_insert(key, &acc,
true, std::move(key),
2742 std::forward<M>(obj));
2747 acc->second = std::forward<M>(obj);
2763 typename K,
typename M,
2764 typename =
typename std::enable_if<
2765 concurrent_hash_map_internal::has_transparent_key_equal<
2767 std::is_constructible<key_type, K>::value,
2772 concurrent_hash_map_internal::check_outside_tx();
2776 internal_insert(key, &acc,
true, std::forward<K>(key),
2777 std::forward<M>(obj));
2782 acc->second = std::forward<M>(obj);
2800 concurrent_hash_map_internal::check_outside_tx();
2802 return internal_erase(key);
2824 defragment(
double start_percent = 0,
double amount_percent = 100)
2826 double end_percent = start_percent + amount_percent;
2827 if (start_percent < 0 || start_percent >= 100 ||
2828 end_percent < 0 || end_percent > 100 ||
2829 start_percent >= end_percent) {
2830 throw std::range_error(
"incorrect range");
2833 size_t max_index = mask().load(std::memory_order_acquire);
2834 size_t start_index =
2835 static_cast<size_t>((start_percent * max_index) / 100);
2837 static_cast<size_t>((end_percent * max_index) / 100);
2841 end_index = (std::min)(end_index, max_index);
2843 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2844 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2857 for (
size_t i = end_index + 1; i >= start_index + 1; i--) {
2869 return my_defrag.
run();
2886 template <
typename K,
2887 typename =
typename std::enable_if<
2888 concurrent_hash_map_internal::
2889 has_transparent_key_equal<hasher>::value,
2894 concurrent_hash_map_internal::check_outside_tx();
2896 return internal_erase(key);
2906 bool try_acquire_item(const_accessor *result, node_mutex_t &
mutex,
2915 using mutex_t = MutexType;
2921 vec.emplace_back(base, h,
true );
2922 bucket *b = vec.back().get();
2924 auto node_ptr =
static_cast<node *
>(
2925 b->node_list.get(base->my_pool_uuid));
2929 if (!base->try_acquire_item(&ca,
2937 static_cast<node *
>(node_ptr->next.get(
2938 (base->my_pool_uuid)));
2945 std::vector<bucket_accessor> vec;
2948 template <
typename K>
2949 bool internal_find(
const K &key, const_accessor *result,
bool write);
2951 template <
typename K,
typename... Args>
2952 bool internal_insert(
const K &key, const_accessor *result,
bool write,
2956 template <
bool Bucket_rw_lock,
typename K>
2957 persistent_node_ptr_t
2958 get_node(
const K &key, bucket_accessor &b)
2961 auto n = search_bucket(key, b.get());
2964 if (Bucket_rw_lock && !b.is_writer() &&
2965 !scoped_lock_traits_type::upgrade_to_writer(b)) {
2969 n = search_bucket(key, b.get());
2972 scoped_lock_traits_type::
2973 downgrade_to_reader(b);
2982 template <
typename K>
2983 bool internal_erase(
const K &key);
2985 void clear_segment(segment_index_t s);
2992 template <
typename I>
3002 auto node_ptr =
static_cast<node *
>(
3003 b->node_list.get(this->my_pool_uuid));
3014 node_ptr =
static_cast<node *
>(
3015 node_ptr->next.get((this->my_pool_uuid)));
3020 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3021 typename MutexType,
typename ScopedLockType>
3023 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3024 ScopedLockType>::try_acquire_item(const_accessor *result,
3025 node_mutex_t &mutex,
3029 if (!result->try_acquire(mutex, write)) {
3030 for (detail::atomic_backoff backoff(
true);;) {
3031 if (result->try_acquire(mutex, write))
3034 if (!backoff.bounded_pause())
3042 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3043 typename MutexType,
typename ScopedLockType>
3044 template <
typename K>
3046 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3047 ScopedLockType>::internal_find(
const K &key,
3048 const_accessor *result,
3051 assert(!result || !result->my_node);
3053 hashcode_type m = mask().load(std::memory_order_acquire);
3054 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3055 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3058 assert((m & (m + 1)) == 0);
3060 hashcode_type
const h = hasher{}(key);
3062 persistent_node_ptr_t node;
3068 scoped_lock_traits_type::initial_rw_state(
false));
3069 node = get_node<false>(key, b);
3073 if (check_mask_race(h, m)) {
3084 result, node.get(this->my_pool_uuid)->mutex, write))
3091 std::this_thread::yield();
3093 m = mask().load(std::memory_order_acquire);
3094 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3095 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3100 result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
3101 result->my_hash = h;
3107 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3108 typename MutexType,
typename ScopedLockType>
3109 template <
typename K,
typename... Args>
3111 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3112 ScopedLockType>::internal_insert(
const K &key,
3113 const_accessor *result,
3117 assert(!result || !result->my_node);
3119 hashcode_type m = mask().load(std::memory_order_acquire);
3120 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3121 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3124 assert((m & (m + 1)) == 0);
3126 hashcode_type
const h = hasher{}(key);
3128 persistent_node_ptr_t node;
3129 size_t new_size = 0;
3130 bool inserted =
false;
3136 scoped_lock_traits_type::initial_rw_state(
true));
3137 node = get_node<true>(key, b);
3141 if (check_mask_race(h, m)) {
3147 new_size = insert_new_node(b.get(), node,
3148 std::forward<Args>(args)...);
3155 result, node.get(this->my_pool_uuid)->mutex, write))
3162 std::this_thread::yield();
3164 m = mask().load(std::memory_order_acquire);
3165 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3166 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3171 result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
3172 result->my_hash = h;
3175 check_growth(m, new_size);
3180 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3181 typename MutexType,
typename ScopedLockType>
3182 template <
typename K>
3184 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3185 ScopedLockType>::internal_erase(
const K &key)
3188 hashcode_type
const h = hasher{}(key);
3189 hashcode_type m = mask().load(std::memory_order_acquire);
3190 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3191 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3194 pool_base pop = get_pool_base();
3199 bucket_accessor b(
this, h & m,
3200 scoped_lock_traits_type::initial_rw_state(
true));
3203 node_ptr_t *p = &b->node_list;
3208 detail::static_persistent_pool_pointer_cast<node>(
3209 n)(this->my_pool_uuid)
3211 p = &n(this->my_pool_uuid)->next;
3217 if (check_mask_race(h, m))
3221 }
else if (!b.is_writer() &&
3222 !scoped_lock_traits_type::upgrade_to_writer(b)) {
3223 if (check_mask_race(h, m))
3229 persistent_ptr<node> del = n(this->my_pool_uuid);
3237 if (!try_acquire_item(&acc, del->mutex,
true)) {
3241 std::this_thread::yield();
3243 m = mask().load(std::memory_order_acquire);
3249 assert(pmemobj_tx_stage() == TX_STAGE_NONE);
3251 auto &size_diff = this->thread_size_diff();
3268 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3269 typename MutexType,
typename ScopedLockType>
3274 internal_swap(table);
3277 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3278 typename MutexType,
typename ScopedLockType>
3283 concurrent_hash_map_internal::check_outside_tx();
3286 hashcode_type m = mask();
3290 hashcode_type b = (m + 1) >> 1;
3293 assert((b & (b - 1)) == 0);
3295 for (; b <= m; ++b) {
3296 bucket *bp = get_bucket(b);
3298 concurrent_hash_map_internal::assert_not_locked<mutex_t,
3302 if (bp->is_rehashed(std::memory_order_relaxed) ==
false)
3303 rehash_bucket<true>(bp, b);
3307 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3308 typename MutexType,
typename ScopedLockType>
3312 hashcode_type m = mask();
3314 assert((m & (m + 1)) == 0);
3318 for (segment_index_t b = 0; b <= m; ++b) {
3319 bucket *bp = get_bucket(b);
3320 concurrent_hash_map_internal::assert_not_locked<mutex_t,
3331 assert(this->tls_ptr !=
nullptr);
3332 this->tls_ptr->clear();
3334 this->on_init_size = 0;
3336 segment_index_t s = segment_traits_t::segment_index_of(m);
3338 assert(s + 1 == this->block_table_size ||
3339 !segment_facade_t(this->my_table, s + 1).is_valid());
3354 mask().store(embedded_buckets - 1, std::memory_order_relaxed);
3361 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3362 typename MutexType,
typename ScopedLockType>
3365 ScopedLockType>::clear_segment(segment_index_t s)
3367 segment_facade_t segment(this->my_table, s);
3369 assert(segment.is_valid());
3371 size_type sz = segment.size();
3372 for (segment_index_t i = 0; i < sz; ++i) {
3373 for (node_ptr_t n = segment[i].node_list; n;
3374 n = segment[i].node_list) {
3375 segment[i].node_list = n(this->my_pool_uuid)->next;
3380 if (s >= segment_traits_t::embedded_segments)
3384 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3385 typename MutexType,
typename ScopedLockType>
3390 auto pop = get_pool_base();
3392 reserve(source.
size());
3393 internal_copy(source.
begin(), source.
end());
3396 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3397 typename MutexType,
typename ScopedLockType>
3398 template <
typename I>
3401 ScopedLockType>::internal_copy(I first, I last)
3403 hashcode_type m = mask();
3405 for (; first != last; ++first) {
3406 hashcode_type h = hasher{}(first->first);
3407 bucket *b = get_bucket(h & m);
3409 assert(b->is_rehashed(std::memory_order_relaxed));
3411 detail::persistent_pool_ptr<node> p;
3412 insert_new_node(b, p, *first);
3416 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3417 typename MutexType,
typename ScopedLockType>
3419 operator==(
const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3421 const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3424 if (a.size() != b.size())
3427 typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3428 ScopedLockType>::const_iterator
3432 typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3433 ScopedLockType>::const_iterator j,
3436 for (; i != i_end; ++i) {
3437 j = b.equal_range(i->first).first;
3439 if (j == j_end || !(i->second == j->second))
3446 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3447 typename MutexType,
typename ScopedLockType>
3449 operator!=(
const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3451 const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3457 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3458 typename MutexType,
typename ScopedLockType>
3460 swap(concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &a,
3461 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &b)