39 #ifndef PMEMOBJ_CONCURRENT_HASH_MAP_HPP
40 #define PMEMOBJ_CONCURRENT_HASH_MAP_HPP
44 #include <libpmemobj++/detail/pair.hpp>
54 #include <libpmemobj++/detail/persistent_pool_ptr.hpp>
62 #include <initializer_list>
67 #include <type_traits>
77 struct hash<
pmem::obj::p<T>> {
81 return hash<T>()(x.
get_ro());
91 namespace concurrent_hash_map_internal
93 template <
typename SharedMutexT>
94 class shared_mutex_scoped_lock {
95 using rw_mutex_type = SharedMutexT;
98 shared_mutex_scoped_lock(
const shared_mutex_scoped_lock &) =
delete;
99 shared_mutex_scoped_lock &
100 operator=(
const shared_mutex_scoped_lock &) =
delete;
103 shared_mutex_scoped_lock() : mutex(nullptr), is_writer(false)
108 shared_mutex_scoped_lock(rw_mutex_type &m,
bool write =
true)
115 ~shared_mutex_scoped_lock()
123 acquire(rw_mutex_type &m,
bool write =
true)
130 mutex->lock_shared();
140 rw_mutex_type *m = mutex;
154 try_acquire(rw_mutex_type &m,
bool write =
true)
159 result = write ? m.try_lock() : m.try_lock_shared();
170 rw_mutex_type *mutex;
178 template <
typename ScopedLockType>
179 using scoped_lock_upgrade_to_writer =
180 decltype(std::declval<ScopedLockType>().upgrade_to_writer());
182 template <
typename ScopedLockType>
183 using scoped_lock_has_upgrade_to_writer =
184 detail::supports<ScopedLockType, scoped_lock_upgrade_to_writer>;
186 template <
typename ScopedLockType>
187 using scoped_lock_downgrade_to_reader =
188 decltype(std::declval<ScopedLockType>().downgrade_to_reader());
190 template <
typename ScopedLockType>
191 using scoped_lock_has_downgrade_to_reader =
192 detail::supports<ScopedLockType, scoped_lock_downgrade_to_reader>;
194 template <
typename ScopedLockType,
195 bool = scoped_lock_has_upgrade_to_writer<ScopedLockType>::value
196 &&scoped_lock_has_downgrade_to_reader<ScopedLockType>::value>
197 class scoped_lock_traits {
199 using scope_lock_type = ScopedLockType;
202 initial_rw_state(
bool write)
209 upgrade_to_writer(scope_lock_type &lock)
211 return lock.upgrade_to_writer();
215 downgrade_to_reader(scope_lock_type &lock)
217 return lock.downgrade_to_reader();
221 template <
typename ScopedLockType>
222 class scoped_lock_traits<ScopedLockType, false> {
224 using scope_lock_type = ScopedLockType;
227 initial_rw_state(
bool write)
235 upgrade_to_writer(scope_lock_type &lock)
244 downgrade_to_reader(scope_lock_type &lock)
256 template <
typename Key,
typename T,
typename Hash = std::hash<Key>,
257 typename KeyEqual = std::equal_to<Key>,
258 typename MutexType = pmem::obj::shared_mutex,
259 typename ScopedLockType = concurrent_hash_map_
internal::
260 shared_mutex_scoped_lock<MutexType>>
261 class concurrent_hash_map;
264 namespace concurrent_hash_map_internal
270 if (pmemobj_tx_stage() != TX_STAGE_NONE)
272 "Function called inside transaction scope.");
275 template <
typename Hash>
276 using transparent_key_equal =
typename Hash::transparent_key_equal;
278 template <
typename Hash>
279 using has_transparent_key_equal = detail::supports<Hash, transparent_key_equal>;
281 template <
typename Hash,
typename Pred,
282 bool = has_transparent_key_equal<Hash>::value>
283 struct key_equal_type {
284 using type =
typename Hash::transparent_key_equal;
287 template <
typename Hash,
typename Pred>
288 struct key_equal_type<Hash, Pred, false> {
292 template <
typename Mutex,
typename ScopedLockType>
294 assert_not_locked(Mutex &mtx)
297 ScopedLockType scoped_lock;
298 assert(scoped_lock.try_acquire(mtx));
299 scoped_lock.release();
305 template <
typename Key,
typename T,
typename MutexType,
typename ScopedLockType>
306 struct hash_map_node {
308 using mutex_t = MutexType;
311 using scoped_t = ScopedLockType;
313 using value_type = detail::pair<const Key, T>;
316 using node_ptr_t = detail::persistent_pool_ptr<
317 hash_map_node<Key, T, mutex_t, scoped_t>>;
328 hash_map_node(
const node_ptr_t &_next,
const Key &key)
330 item(std::piecewise_construct, std::forward_as_tuple(key),
331 std::forward_as_tuple())
335 hash_map_node(
const node_ptr_t &_next,
const Key &key,
const T &t)
336 : next(_next), item(key, t)
340 hash_map_node(
const node_ptr_t &_next, value_type &&i)
341 : next(_next), item(std::move(i))
345 template <
typename... Args>
346 hash_map_node(
const node_ptr_t &_next, Args &&... args)
347 : next(_next), item(std::forward<Args>(args)...)
351 hash_map_node(
const node_ptr_t &_next,
const value_type &i)
352 : next(_next), item(i)
357 hash_map_node(
const hash_map_node &) =
delete;
360 hash_map_node &operator=(
const hash_map_node &) =
delete;
367 template <
typename Bucket>
368 class segment_traits {
371 using segment_index_t = size_t;
372 using size_type = size_t;
373 using bucket_type = Bucket;
377 constexpr
static size_type max_allocation_size = PMEMOBJ_MAX_ALLOC_SIZE;
380 constexpr
static segment_index_t first_big_block = 27;
385 constexpr
static size_type big_block_size = size_type(1)
389 static_assert((big_block_size *
sizeof(bucket_type)) <
391 "Block size exceeds max_allocation_size");
394 constexpr
static segment_index_t
395 first_block_in_segment(segment_index_t seg)
397 return seg < first_big_block
400 (segment_index_t(1) << (seg - first_big_block)) - 1);
404 constexpr
static size_type
405 blocks_in_segment(segment_index_t seg)
407 return seg < first_big_block
409 : segment_index_t(1) << (seg - first_big_block);
413 constexpr
static size_type
414 block_size(segment_index_t b)
416 return b < first_big_block ? segment_size(b ? b : 1)
422 constexpr
static segment_index_t embedded_segments = 1;
425 constexpr
static size_type embedded_buckets = 1 << embedded_segments;
428 constexpr
static segment_index_t number_of_segments = 32;
431 static const size_type first_block = 8;
434 constexpr
static segment_index_t
437 return first_block_in_segment(number_of_segments);
441 static segment_index_t
442 segment_index_of(size_type index)
444 return segment_index_t(detail::Log2(index | 1));
448 constexpr
static segment_index_t
449 segment_base(segment_index_t k)
451 return (segment_index_t(1) << k) & ~segment_index_t(1);
455 constexpr
static size_type
456 segment_size(segment_index_t k)
458 return size_type(1) << k;
461 embedded_segments < first_big_block,
462 "Number of embedded segments cannot exceed max_allocation_size");
481 template <
typename BlockTable,
typename SegmentTraits,
bool is_const>
482 class segment_facade_impl :
public SegmentTraits {
484 using traits_type = SegmentTraits;
485 using traits_type::block_size;
486 using traits_type::blocks_in_segment;
487 using traits_type::embedded_buckets;
488 using traits_type::embedded_segments;
489 using traits_type::first_block;
490 using traits_type::first_block_in_segment;
491 using traits_type::segment_base;
492 using traits_type::segment_size;
495 using table_reference =
496 typename std::conditional<is_const,
const BlockTable &,
499 using table_pointer =
500 typename std::conditional<is_const,
const BlockTable *,
503 using bucket_type =
typename traits_type::bucket_type;
504 using segment_index_t =
typename traits_type::segment_index_t;
505 using size_type =
typename traits_type::size_type;
508 segment_facade_impl(table_reference table, segment_index_t s)
509 : my_table(&table), my_seg(s)
511 assert(my_seg < traits_type::number_of_segments);
515 segment_facade_impl(
const segment_facade_impl &src)
516 : my_table(src.my_table), my_seg(src.my_seg)
520 segment_facade_impl(segment_facade_impl &&src) =
default;
523 segment_facade_impl &
524 operator=(
const segment_facade_impl &src)
526 my_table = src.my_table;
532 segment_facade_impl &
533 operator=(segment_facade_impl &&src)
535 my_table = src.my_table;
546 bucket_type &operator[](size_type i)
const
550 segment_index_t table_block = first_block_in_segment(my_seg);
551 size_type b_size = block_size(table_block);
553 table_block += i / b_size;
556 return (*my_table)[table_block][
static_cast<std::ptrdiff_t
>(i)];
562 segment_facade_impl &
575 segment_facade_impl tmp = *
this;
583 segment_facade_impl &
596 segment_facade_impl tmp = *
this;
604 segment_facade_impl &
614 segment_facade_impl &
627 return segment_facade_impl(*(this->my_table),
637 return segment_facade_impl(*(this->my_table),
645 enable(pool_base &pop)
647 assert(my_seg >= embedded_segments);
649 if (my_seg < first_block) {
650 enable_first_block(pop);
652 enable_big_segment(pop);
662 assert(my_seg >= embedded_segments);
664 if (my_seg < first_block) {
665 if (my_seg == embedded_segments) {
666 size_type sz = segment_size(first_block) -
668 delete_persistent<bucket_type[]>(
669 (*my_table)[my_seg], sz);
671 (*my_table)[my_seg] =
nullptr;
673 block_range blocks = segment_blocks(my_seg);
675 for (segment_index_t b = blocks.first;
676 b < blocks.second; ++b) {
677 if ((*my_table)[b] !=
nullptr) {
678 delete_persistent<bucket_type[]>(
679 (*my_table)[b], block_size(b));
680 (*my_table)[b] =
nullptr;
692 return segment_size(my_seg ? my_seg : 1);
703 block_range blocks = segment_blocks(my_seg);
705 for (segment_index_t b = blocks.first; b < blocks.second; ++b) {
706 if ((*my_table)[b] ==
nullptr)
714 using block_range = std::pair<segment_index_t, segment_index_t>;
720 segment_blocks(segment_index_t seg)
722 segment_index_t
begin = first_block_in_segment(seg);
724 return block_range(
begin,
begin + blocks_in_segment(seg));
728 enable_first_block(pool_base &pop)
730 assert(my_seg == embedded_segments);
732 transaction::manual tx(pop);
735 segment_size(first_block) - embedded_buckets;
736 (*my_table)[my_seg] =
737 make_persistent<bucket_type[]>(sz);
739 persistent_ptr<bucket_type> base =
740 (*my_table)[embedded_segments].raw();
742 for (segment_index_t s = my_seg + 1; s < first_block;
745 static_cast<std::ptrdiff_t
>(
747 segment_base(my_seg));
749 (*my_table)[s] = (base + off).raw();
757 enable_big_segment(pool_base &pop)
759 block_range blocks = segment_blocks(my_seg);
761 transaction::manual tx(pop);
763 for (segment_index_t b = blocks.first;
764 b < blocks.second; ++b) {
765 assert((*my_table)[b] ==
nullptr);
766 (*my_table)[b] = make_persistent<bucket_type[]>(
775 table_pointer my_table;
778 segment_index_t my_seg;
787 template <
typename Key,
typename T,
typename MutexType,
typename ScopedLockType>
788 class hash_map_base {
790 using mutex_t = MutexType;
791 using scoped_t = ScopedLockType;
794 using size_type = size_t;
797 using hashcode_type = size_t;
800 using node = hash_map_node<Key, T, mutex_t, scoped_t>;
803 using node_ptr_t = detail::persistent_pool_ptr<node>;
807 using mutex_t = MutexType;
808 using scoped_t = ScopedLockType;
814 p<std::atomic<uint64_t>> rehashed;
817 node_ptr_t node_list;
820 bucket() : node_list(nullptr)
822 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
823 VALGRIND_HG_DISABLE_CHECKING(&rehashed,
826 rehashed.get_rw() =
false;
835 is_rehashed(std::memory_order order)
837 return rehashed.get_ro().load(order);
841 set_rehashed(std::memory_order order)
843 rehashed.get_rw().store(
true, order);
847 bucket(
const bucket &) =
delete;
850 bucket &operator=(
const bucket &) =
delete;
854 using segment_traits_t = segment_traits<bucket>;
857 using segment_index_t =
typename segment_traits_t::segment_index_t;
860 static const size_type embedded_buckets =
861 segment_traits_t::embedded_buckets;
864 static const size_type first_block = segment_traits_t::first_block;
867 constexpr
static size_type block_table_size =
868 segment_traits_t::number_of_blocks();
871 using segment_ptr_t = persistent_ptr<bucket[]>;
874 using bucket_ptr_t = persistent_ptr<bucket>;
877 using blocks_table_t = segment_ptr_t[block_table_size];
884 p<int64_t> size_diff = 0;
885 std::aligned_storage<56, 8> padding;
888 using tls_t = detail::enumerable_thread_specific<tls_data_t>;
890 enum feature_flags : uint32_t { FEATURE_CONSISTENT_SIZE = 1 };
895 p<uint32_t> incompat;
901 p<uint64_t> my_pool_uuid;
905 features layout_features;
909 std::aligned_storage<
sizeof(size_t),
sizeof(
size_t)>::type
914 std::atomic<hashcode_type> my_mask;
917 std::size_t value_size;
920 std::aligned_storage<24, 8>::type padding1;
926 blocks_table_t my_table;
931 std::atomic<size_type> my_size;
934 std::aligned_storage<24, 8>::type padding2;
937 persistent_ptr<tls_t> tls_ptr;
944 p<size_t> on_init_size;
947 std::aligned_storage<40, 8>::type reserved;
950 segment_enable_mutex_t my_segment_enable_mutex;
953 bucket my_embedded_segment[embedded_buckets];
958 static constexpr features
961 return {FEATURE_CONSISTENT_SIZE, 0};
964 const std::atomic<hashcode_type> &
965 mask() const noexcept
970 std::atomic<hashcode_type> &
979 return my_size.load(std::memory_order_relaxed);
985 assert(this->tls_ptr !=
nullptr);
986 return this->tls_ptr->local().size_diff;
993 assert(this->tls_ptr !=
nullptr);
995 pool_base pop = pool_base{pmemobj_pool_by_ptr(
this)};
997 int64_t last_run_size = 0;
998 for (
auto &data : *tls_ptr)
999 last_run_size += data.size_diff;
1002 assert(last_run_size >= 0 ||
1003 static_cast<int64_t
>(
static_cast<size_t>(last_run_size) +
1004 on_init_size) >= 0);
1007 on_init_size +=
static_cast<size_t>(last_run_size);
1011 this->my_size = on_init_size;
1015 using const_segment_facade_t =
1016 segment_facade_impl<blocks_table_t, segment_traits_t, true>;
1019 using segment_facade_t =
1020 segment_facade_impl<blocks_table_t, segment_traits_t, false>;
1026 sizeof(size_type) ==
sizeof(std::atomic<size_type>),
1027 "std::atomic should have the same layout as underlying integral type");
1029 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1030 VALGRIND_HG_DISABLE_CHECKING(&my_mask,
sizeof(my_mask));
1032 layout_features = {0, 0};
1034 PMEMoid oid = pmemobj_oid(
this);
1036 assert(!OID_IS_NULL(oid));
1038 my_pool_uuid = oid.pool_uuid_lo;
1040 pool_base pop = get_pool_base();
1042 for (size_type i = 0; i < segment_traits_t::embedded_segments;
1045 pmemobj_oid(my_embedded_segment +
1046 segment_traits_t::segment_base(i));
1047 segment_facade_t seg(my_table, i);
1048 mark_rehashed<false>(pop, seg);
1055 this->tls_ptr =
nullptr;
1066 auto pop = get_pool_base();
1068 if ((layout_features.compat & FEATURE_CONSISTENT_SIZE) &&
1071 delete_persistent<tls_t>(tls_ptr);
1083 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1084 VALGRIND_HG_DISABLE_CHECKING(&my_size,
sizeof(my_size));
1085 VALGRIND_HG_DISABLE_CHECKING(&my_mask,
sizeof(my_mask));
1087 #if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED
1088 VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_size,
sizeof(my_size));
1089 VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_mask,
sizeof(my_mask));
1092 hashcode_type m = embedded_buckets - 1;
1094 const_segment_facade_t segment(
1095 my_table, segment_traits_t::embedded_segments);
1097 while (segment.is_valid()) {
1098 m += segment.size();
1102 mask().store(m, std::memory_order_relaxed);
1108 template <
bool Flush = true>
1110 mark_rehashed(pool_base &pop, segment_facade_t &segment)
1112 for (size_type i = 0; i < segment.size(); ++i) {
1113 bucket *b = &(segment[i]);
1115 assert_not_locked<mutex_t, scoped_t>(b->mutex);
1117 b->set_rehashed(std::memory_order_relaxed);
1122 for (size_type i = 0; i < segment.size(); ++i) {
1123 bucket *b = &(segment[i]);
1124 pop.flush(b->rehashed);
1135 enable_segment(segment_index_t k,
bool is_initial =
false)
1139 pool_base pop = get_pool_base();
1142 if (k >= first_block) {
1143 segment_facade_t new_segment(my_table, k);
1145 sz = new_segment.size();
1146 if (!new_segment.is_valid())
1147 new_segment.enable(pop);
1150 mark_rehashed(pop, new_segment);
1157 assert(k == segment_traits_t::embedded_segments);
1159 for (segment_index_t i = k; i < first_block; ++i) {
1160 segment_facade_t new_segment(my_table, i);
1162 if (!new_segment.is_valid())
1163 new_segment.enable(pop);
1166 mark_rehashed(pop, new_segment);
1170 sz = segment_traits_t::segment_size(first_block);
1172 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1173 ANNOTATE_HAPPENS_BEFORE(&my_mask);
1175 mask().store(sz - 1, std::memory_order_release);
1183 get_bucket(hashcode_type h)
const
1185 segment_index_t s = segment_traits_t::segment_index_of(h);
1187 h -= segment_traits_t::segment_base(s);
1189 const_segment_facade_t segment(my_table, s);
1191 assert(segment.is_valid());
1193 return &(segment[h]);
1200 check_mask_race(hashcode_type h, hashcode_type &m)
const
1202 hashcode_type m_now, m_old = m;
1204 m_now = mask().load(std::memory_order_acquire);
1205 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1206 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
1210 return check_rehashing_collision(h, m_old, m = m_now);
1219 check_rehashing_collision(hashcode_type h, hashcode_type m_old,
1220 hashcode_type m)
const
1224 if ((h & m_old) != (h & m)) {
1229 for (++m_old; !(h & m_old); m_old <<= 1)
1232 m_old = (m_old << 1) - 1;
1234 assert((m_old & (m_old + 1)) == 0 && m_old <= m);
1237 bucket *b = get_bucket(h & m_old);
1238 return b->is_rehashed(std::memory_order_acquire);
1248 template <
typename Node,
typename... Args>
1250 insert_new_node_internal(bucket *b,
1251 detail::persistent_pool_ptr<Node> &new_node,
1254 assert(pmemobj_tx_stage() == TX_STAGE_WORK);
1256 new_node = pmem::obj::make_persistent<Node>(
1257 b->node_list, std::forward<Args>(args)...);
1258 b->node_list = new_node;
1265 template <
typename Node,
typename... Args>
1267 insert_new_node(bucket *b, detail::persistent_pool_ptr<Node> &new_node,
1270 pool_base pop = get_pool_base();
1277 if (pmemobj_tx_stage() == TX_STAGE_WORK) {
1278 insert_new_node_internal(b, new_node,
1279 std::forward<Args>(args)...);
1280 this->on_init_size++;
1282 auto &size_diff = thread_size_diff();
1285 insert_new_node_internal(
1287 std::forward<Args>(args)...);
1293 return ++(this->my_size);
1301 check_growth(hashcode_type m, size_type sz)
1304 segment_index_t new_seg =
1305 static_cast<segment_index_t
>(detail::Log2(
1309 assert(segment_facade_t(my_table, new_seg - 1)
1312 std::unique_lock<segment_enable_mutex_t> lock(
1313 my_segment_enable_mutex, std::try_to_lock);
1316 if (mask().load(std::memory_order_relaxed) ==
1320 enable_segment(new_seg);
1334 reserve(size_type buckets)
1341 bool is_initial = this->size() == 0;
1343 for (size_type m = mask(); buckets > m; m = mask())
1345 segment_traits_t::segment_index_of(m + 1),
1354 internal_swap(hash_map_base<Key, T, mutex_t, scoped_t> &table)
1356 pool_base p = get_pool_base();
1358 transaction::manual tx(p);
1360 this->my_pool_uuid.swap(table.my_pool_uuid);
1371 this->mask() = table.mask().exchange(
1372 this->mask(), std::memory_order_relaxed);
1374 this->my_size = table.my_size.exchange(
1375 this->my_size, std::memory_order_relaxed);
1378 std::swap(this->tls_ptr, table.tls_ptr);
1380 for (size_type i = 0; i < embedded_buckets; ++i)
1381 this->my_embedded_segment[i].node_list.swap(
1382 table.my_embedded_segment[i].node_list);
1384 for (size_type i = segment_traits_t::embedded_segments;
1385 i < block_table_size; ++i)
1386 this->my_table[i].
swap(table.my_table[i]);
1400 pmemobj_pool_by_oid(PMEMoid{my_pool_uuid, 0});
1402 return pool_base(pop);
1411 template <
typename Container,
bool is_const>
1412 class hash_map_iterator {
1414 using iterator_category = std::forward_iterator_tag;
1415 using difference_type = ptrdiff_t;
1416 using map_type = Container;
1417 using value_type =
typename map_type::value_type;
1418 using node =
typename map_type::node;
1419 using bucket =
typename map_type::bucket;
1420 using map_ptr =
typename std::conditional<is_const,
const map_type *,
1423 typename std::conditional<is_const,
1424 typename map_type::const_reference,
1425 typename map_type::reference>::type;
1427 typename std::conditional<is_const,
1428 typename map_type::const_pointer,
1429 typename map_type::pointer>::type;
1431 template <
typename C,
bool M,
bool U>
1432 friend bool operator==(
const hash_map_iterator<C, M> &i,
1433 const hash_map_iterator<C, U> &j);
1435 template <
typename C,
bool M,
bool U>
1436 friend bool operator!=(
const hash_map_iterator<C, M> &i,
1437 const hash_map_iterator<C, U> &j);
1439 friend class hash_map_iterator<map_type, true>;
1441 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
1443 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
1444 typename MutexType,
typename ScopedLockType>
1445 friend class ::pmem::obj::concurrent_hash_map;
1449 hash_map_iterator(map_ptr map,
size_t index)
1450 : my_map(map), my_index(index), my_bucket(nullptr), my_node(nullptr)
1452 if (my_index <= my_map->mask()) {
1453 bucket_accessor acc(my_map, my_index);
1454 my_bucket = acc.get();
1455 my_node =
static_cast<node *
>(
1456 my_bucket->node_list.get(my_map->my_pool_uuid));
1459 advance_to_next_bucket();
1466 hash_map_iterator() =
default;
1469 hash_map_iterator(
const hash_map_iterator &other)
1470 : my_map(other.my_map),
1471 my_index(other.my_index),
1472 my_bucket(other.my_bucket),
1473 my_node(other.my_node)
1478 template <
typename U = void,
1479 typename =
typename std::enable_if<is_const, U>::type>
1480 hash_map_iterator(
const hash_map_iterator<map_type, false> &other)
1481 : my_map(other.my_map),
1482 my_index(other.my_index),
1483 my_bucket(other.my_bucket),
1484 my_node(other.my_node)
1488 hash_map_iterator &operator=(
const hash_map_iterator &it) =
default;
1491 reference operator*()
const
1494 return my_node->item;
1498 pointer operator->()
const
1500 return &operator*();
1507 my_node =
static_cast<node *
>(
1508 my_node->next.get((my_map->my_pool_uuid)));
1511 advance_to_next_bucket();
1520 hash_map_iterator old(*
this);
1527 map_ptr my_map =
nullptr;
1530 size_t my_index = 0;
1533 bucket *my_bucket =
nullptr;
1536 node *my_node =
nullptr;
1538 class bucket_accessor {
1540 bucket_accessor(map_ptr m,
size_t index)
1542 my_bucket = m->get_bucket(index);
1556 advance_to_next_bucket()
1558 size_t k = my_index + 1;
1562 while (k <= my_map->mask()) {
1563 bucket_accessor acc(my_map, k);
1564 my_bucket = acc.get();
1566 if (my_bucket->node_list) {
1567 my_node =
static_cast<node *
>(
1568 my_bucket->node_list.get(
1569 my_map->my_pool_uuid));
1585 template <
typename Container,
bool M,
bool U>
1587 operator==(
const hash_map_iterator<Container, M> &i,
1588 const hash_map_iterator<Container, U> &j)
1590 return i.my_node == j.my_node && i.my_map == j.my_map;
1593 template <
typename Container,
bool M,
bool U>
1595 operator!=(
const hash_map_iterator<Container, M> &i,
1596 const hash_map_iterator<Container, U> &j)
1598 return i.my_node != j.my_node || i.my_map != j.my_map;
1649 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
1650 typename MutexType,
typename ScopedLockType>
1652 :
protected concurrent_hash_map_internal::hash_map_base<Key, T, MutexType,
1654 template <
typename Container,
bool is_const>
1655 friend class concurrent_hash_map_internal::hash_map_iterator;
1658 using size_type =
typename concurrent_hash_map_internal::hash_map_base<
1659 Key, T, MutexType, ScopedLockType>::size_type;
1660 using hashcode_type =
1661 typename concurrent_hash_map_internal::hash_map_base<
1662 Key, T, MutexType, ScopedLockType>::hashcode_type;
1663 using key_type = Key;
1664 using mapped_type = T;
1665 using value_type =
typename concurrent_hash_map_internal::hash_map_base<
1666 Key, T, MutexType, ScopedLockType>::node::value_type;
1667 using difference_type = ptrdiff_t;
1668 using pointer = value_type *;
1669 using const_pointer =
const value_type *;
1670 using reference = value_type &;
1671 using const_reference =
const value_type &;
1672 using iterator = concurrent_hash_map_internal::hash_map_iterator<
1674 using const_iterator = concurrent_hash_map_internal::hash_map_iterator<
1676 using hasher = Hash;
1677 using key_equal =
typename concurrent_hash_map_internal::key_equal_type<
1678 Hash, KeyEqual>::type;
1681 using mutex_t = MutexType;
1682 using scoped_t = ScopedLockType;
1686 using hash_map_base =
1687 concurrent_hash_map_internal::hash_map_base<Key, T, mutex_t,
1689 using hash_map_base::calculate_mask;
1690 using hash_map_base::check_growth;
1691 using hash_map_base::check_mask_race;
1692 using hash_map_base::embedded_buckets;
1693 using hash_map_base::FEATURE_CONSISTENT_SIZE;
1694 using hash_map_base::get_bucket;
1695 using hash_map_base::get_pool_base;
1696 using hash_map_base::header_features;
1697 using hash_map_base::insert_new_node;
1698 using hash_map_base::internal_swap;
1699 using hash_map_base::layout_features;
1700 using hash_map_base::mask;
1701 using hash_map_base::reserve;
1702 using tls_t =
typename hash_map_base::tls_t;
1703 using node =
typename hash_map_base::node;
1704 using node_mutex_t =
typename node::mutex_t;
1705 using node_ptr_t =
typename hash_map_base::node_ptr_t;
1706 using bucket =
typename hash_map_base::bucket;
1707 using bucket_lock_type =
typename bucket::scoped_t;
1708 using segment_index_t =
typename hash_map_base::segment_index_t;
1709 using segment_traits_t =
typename hash_map_base::segment_traits_t;
1710 using segment_facade_t =
typename hash_map_base::segment_facade_t;
1711 using scoped_lock_traits_type =
1712 concurrent_hash_map_internal::scoped_lock_traits<scoped_t>;
1715 using persistent_node_ptr_t = detail::persistent_pool_ptr<node>;
1718 delete_node(
const node_ptr_t &n)
1720 delete_persistent<node>(
1721 detail::static_persistent_pool_pointer_cast<node>(n)
1722 .get_persistent_ptr(this->my_pool_uuid));
1725 template <
typename K>
1726 persistent_node_ptr_t
1727 search_bucket(
const K &key, bucket *b)
const
1729 assert(b->is_rehashed(std::memory_order_relaxed));
1731 persistent_node_ptr_t n =
1732 detail::static_persistent_pool_pointer_cast<node>(
1737 n.get(this->my_pool_uuid)->item.first)) {
1738 n = detail::static_persistent_pool_pointer_cast<node>(
1739 n.get(this->my_pool_uuid)->next);
1755 bucket_lock_type::mutex = b.bucket_lock_type::mutex;
1756 bucket_lock_type::is_writer =
1757 b.bucket_lock_type::is_writer;
1759 b.bucket_lock_type::mutex =
nullptr;
1760 b.bucket_lock_type::is_writer =
false;
1764 const hashcode_type h,
bool writer =
false)
1775 bool writer =
false)
1777 my_b = base->get_bucket(h);
1779 if (my_b->is_rehashed(std::memory_order_acquire) ==
1781 bucket_lock_type::try_acquire(this->my_b->mutex,
1783 if (my_b->is_rehashed(
1784 std::memory_order_relaxed) ==
1787 base->rehash_bucket<
false>(my_b, h);
1790 bucket_lock_type::acquire(my_b->mutex, writer);
1793 assert(my_b->is_rehashed(std::memory_order_relaxed));
1802 return bucket_lock_type::is_writer;
1833 const hashcode_type h,
1834 bool writer =
false)
1836 acquire(base, h, writer);
1844 bool writer =
false)
1846 my_b = base->get_bucket(h);
1848 if (my_b->is_rehashed(std::memory_order_relaxed) ==
1851 base->rehash_bucket<
true>(my_b, h);
1854 assert(my_b->is_rehashed(std::memory_order_relaxed));
1890 get_hash_code(node_ptr_t &n)
1893 detail::static_persistent_pool_pointer_cast<node>(n)(
1898 template <
bool serial>
1900 rehash_bucket(bucket *b_new,
const hashcode_type h)
1902 using accessor_type =
typename std::conditional<
1903 serial, serial_bucket_accessor, bucket_accessor>::type;
1905 using scoped_lock_traits_type =
1906 concurrent_hash_map_internal::scoped_lock_traits<
1912 pool_base pop = get_pool_base();
1913 node_ptr_t *p_new = &(b_new->node_list);
1917 if (*p_new !=
nullptr) {
1918 assert(!b_new->is_rehashed(std::memory_order_relaxed));
1920 b_new->set_rehashed(std::memory_order_relaxed);
1921 pop.persist(b_new->rehashed);
1927 hashcode_type mask = (1u << detail::Log2(h)) - 1;
1928 assert((h & mask) < h);
1929 accessor_type b_old(
1931 scoped_lock_traits_type::initial_rw_state(
true));
1935 mask = (mask << 1) | 1;
1936 assert((mask & (mask + 1)) == 0 && (h & mask) == h);
1939 for (node_ptr_t *p_old = &(b_old->node_list),
1942 hashcode_type c = get_hash_code(n);
1944 hashcode_type bmask = h & (mask >> 1);
1948 : (1u << (detail::Log2(bmask) + 1)) - 1;
1950 assert((c & bmask) == (h & bmask));
1953 if ((c & mask) == h) {
1954 if (!b_old.is_writer() &&
1955 !scoped_lock_traits_type::
1956 upgrade_to_writer(b_old)) {
1966 *p_old = n(this->my_pool_uuid)->next;
1968 p_new = &(n(this->my_pool_uuid)->next);
1971 p_old = &(n(this->my_pool_uuid)->next);
1979 b_new->set_rehashed(std::memory_order_release);
1980 pop.persist(b_new->rehashed);
1984 check_incompat_features()
1986 if (layout_features.incompat != header_features().incompat)
1988 "Incompat flags mismatch, for more details go to: https://pmem.io/pmdk/cpp_obj/ \n");
1990 if ((layout_features.compat & FEATURE_CONSISTENT_SIZE) &&
1991 this->value_size !=
sizeof(value_type))
1993 "Size of value_type is different than the one stored in the pool \n");
2002 :
protected node::scoped_t {
2007 using node::scoped_t::try_acquire;
2014 const typename concurrent_hash_map::value_type;
2035 concurrent_hash_map_internal::check_outside_tx();
2038 node::scoped_t::release();
2050 return my_node->item;
2068 concurrent_hash_map_internal::check_outside_tx();
2083 hashcode_type my_hash;
2098 assert(this->my_node);
2100 return this->my_node->item;
2136 reserve(table.
size());
2154 template <
typename I>
2159 reserve(
static_cast<size_type
>(std::distance(first, last)));
2187 check_incompat_features();
2195 if (!(layout_features.compat & FEATURE_CONSISTENT_SIZE)) {
2197 std::distance(this->
begin(), this->
end());
2198 assert(actual_size >= 0);
2200 this->my_size =
static_cast<size_t>(actual_size);
2202 auto pop = get_pool_base();
2204 this->tls_ptr = make_persistent<tls_t>();
2205 this->on_init_size =
2206 static_cast<size_t>(actual_size);
2207 this->value_size =
sizeof(value_type);
2209 layout_features.compat |=
2210 FEATURE_CONSISTENT_SIZE;
2213 assert(this->tls_ptr !=
nullptr);
2214 this->tls_restore();
2217 assert(this->
size() ==
2218 size_type(std::distance(this->
begin(), this->
end())));
2222 "runtime_initialize(bool) is now deprecated, use runtime_initialize(void)")]]
void
2225 check_incompat_features();
2229 if (!graceful_shutdown) {
2231 std::distance(this->
begin(), this->
end());
2232 assert(actual_size >= 0);
2233 this->my_size =
static_cast<size_type
>(actual_size);
2235 assert(this->
size() ==
2236 size_type(std::distance(this->
begin(),
2252 concurrent_hash_map &
2255 if (
this != &table) {
2294 void rehash(size_type n = 0);
2322 auto pop = get_pool_base();
2351 return iterator(
this, 0);
2361 return iterator(
this, mask() + 1);
2371 return const_iterator(
this, 0);
2381 return const_iterator(
this, mask() + 1);
2390 return hash_map_base::size();
2399 return this->
size() == 0;
2408 return (~size_type(0)) /
sizeof(node);
2437 concurrent_hash_map_internal::check_outside_tx();
2440 key,
nullptr,
false);
2454 template <
typename K,
2455 typename =
typename std::enable_if<
2456 concurrent_hash_map_internal::
2457 has_transparent_key_equal<hasher>::value,
2462 concurrent_hash_map_internal::check_outside_tx();
2465 key,
nullptr,
false);
2477 concurrent_hash_map_internal::check_outside_tx();
2482 key, &result,
false);
2498 template <
typename K,
2499 typename =
typename std::enable_if<
2500 concurrent_hash_map_internal::
2501 has_transparent_key_equal<hasher>::value,
2506 concurrent_hash_map_internal::check_outside_tx();
2511 key, &result,
false);
2523 concurrent_hash_map_internal::check_outside_tx();
2527 return internal_find(key, &result,
true);
2543 template <
typename K,
2544 typename =
typename std::enable_if<
2545 concurrent_hash_map_internal::
2546 has_transparent_key_equal<hasher>::value,
2551 concurrent_hash_map_internal::check_outside_tx();
2555 return internal_find(key, &result,
true);
2567 concurrent_hash_map_internal::check_outside_tx();
2571 return internal_insert(key, &result,
false, key);
2584 concurrent_hash_map_internal::check_outside_tx();
2588 return internal_insert(key, &result,
true, key);
2601 concurrent_hash_map_internal::check_outside_tx();
2605 return internal_insert(value.first, &result,
false, value);
2618 concurrent_hash_map_internal::check_outside_tx();
2622 return internal_insert(value.first, &result,
true, value);
2634 concurrent_hash_map_internal::check_outside_tx();
2636 return internal_insert(value.first,
nullptr,
false, value);
2649 concurrent_hash_map_internal::check_outside_tx();
2653 return internal_insert(value.first, &result,
false,
2667 concurrent_hash_map_internal::check_outside_tx();
2671 return internal_insert(value.first, &result,
true,
2684 concurrent_hash_map_internal::check_outside_tx();
2686 return internal_insert(value.first,
nullptr,
false,
2695 template <
typename I>
2699 concurrent_hash_map_internal::check_outside_tx();
2701 for (; first != last; ++first)
2713 concurrent_hash_map_internal::check_outside_tx();
2715 insert(il.begin(), il.end());
2726 template <
typename M>
2730 concurrent_hash_map_internal::check_outside_tx();
2733 auto result = internal_insert(key, &acc,
true, key,
2734 std::forward<M>(obj));
2739 acc->second = std::forward<M>(obj);
2754 template <
typename M>
2758 concurrent_hash_map_internal::check_outside_tx();
2761 auto result = internal_insert(key, &acc,
true, std::move(key),
2762 std::forward<M>(obj));
2767 acc->second = std::forward<M>(obj);
2783 typename K,
typename M,
2784 typename =
typename std::enable_if<
2785 concurrent_hash_map_internal::has_transparent_key_equal<
2787 std::is_constructible<key_type, K>::value,
2792 concurrent_hash_map_internal::check_outside_tx();
2796 internal_insert(key, &acc,
true, std::forward<K>(key),
2797 std::forward<M>(obj));
2802 acc->second = std::forward<M>(obj);
2820 concurrent_hash_map_internal::check_outside_tx();
2822 return internal_erase(key);
2844 defragment(
double start_percent = 0,
double amount_percent = 100)
2846 double end_percent = start_percent + amount_percent;
2847 if (start_percent < 0 || start_percent >= 100 ||
2848 end_percent < 0 || end_percent > 100 ||
2849 start_percent >= end_percent) {
2850 throw std::range_error(
"incorrect range");
2853 size_t max_index = mask().load(std::memory_order_acquire);
2854 size_t start_index =
2855 static_cast<size_t>((start_percent * max_index) / 100);
2857 static_cast<size_t>((end_percent * max_index) / 100);
2861 end_index = (std::min)(end_index, max_index);
2863 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2864 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2877 for (
size_t i = end_index + 1; i >= start_index + 1; i--) {
2889 return my_defrag.
run();
2906 template <
typename K,
2907 typename =
typename std::enable_if<
2908 concurrent_hash_map_internal::
2909 has_transparent_key_equal<hasher>::value,
2914 concurrent_hash_map_internal::check_outside_tx();
2916 return internal_erase(key);
2926 bool try_acquire_item(const_accessor *result, node_mutex_t &
mutex,
2935 using mutex_t = MutexType;
2941 vec.emplace_back(base, h,
true );
2942 bucket *b = vec.back().get();
2944 auto node_ptr =
static_cast<node *
>(
2945 b->node_list.get(base->my_pool_uuid));
2949 if (!base->try_acquire_item(&ca,
2957 static_cast<node *
>(node_ptr->next.get(
2958 (base->my_pool_uuid)));
2965 std::vector<bucket_accessor> vec;
2968 template <
typename K>
2969 bool internal_find(
const K &key, const_accessor *result,
bool write);
2971 template <
typename K,
typename... Args>
2972 bool internal_insert(
const K &key, const_accessor *result,
bool write,
2976 template <
bool Bucket_rw_lock,
typename K>
2977 persistent_node_ptr_t
2978 get_node(
const K &key, bucket_accessor &b)
2981 auto n = search_bucket(key, b.get());
2984 if (Bucket_rw_lock && !b.is_writer() &&
2985 !scoped_lock_traits_type::upgrade_to_writer(b)) {
2989 n = search_bucket(key, b.get());
2992 scoped_lock_traits_type::
2993 downgrade_to_reader(b);
3002 template <
typename K>
3003 bool internal_erase(
const K &key);
3005 void clear_segment(segment_index_t s);
3012 template <
typename I>
3022 auto node_ptr =
static_cast<node *
>(
3023 b->node_list.get(this->my_pool_uuid));
3034 node_ptr =
static_cast<node *
>(
3035 node_ptr->next.get((this->my_pool_uuid)));
3040 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3041 typename MutexType,
typename ScopedLockType>
3043 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3044 ScopedLockType>::try_acquire_item(const_accessor *result,
3045 node_mutex_t &mutex,
3049 if (!result->try_acquire(mutex, write)) {
3050 for (detail::atomic_backoff backoff(
true);;) {
3051 if (result->try_acquire(mutex, write))
3054 if (!backoff.bounded_pause())
3062 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3063 typename MutexType,
typename ScopedLockType>
3064 template <
typename K>
3066 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3067 ScopedLockType>::internal_find(
const K &key,
3068 const_accessor *result,
3071 assert(!result || !result->my_node);
3073 hashcode_type m = mask().load(std::memory_order_acquire);
3074 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3075 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3078 assert((m & (m + 1)) == 0);
3080 hashcode_type
const h = hasher{}(key);
3082 persistent_node_ptr_t node;
3088 scoped_lock_traits_type::initial_rw_state(
false));
3089 node = get_node<false>(key, b);
3093 if (check_mask_race(h, m)) {
3104 result, node.get(this->my_pool_uuid)->mutex, write))
3111 std::this_thread::yield();
3113 m = mask().load(std::memory_order_acquire);
3114 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3115 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3120 result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
3121 result->my_hash = h;
3127 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3128 typename MutexType,
typename ScopedLockType>
3129 template <
typename K,
typename... Args>
3131 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3132 ScopedLockType>::internal_insert(
const K &key,
3133 const_accessor *result,
3137 assert(!result || !result->my_node);
3139 hashcode_type m = mask().load(std::memory_order_acquire);
3140 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3141 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3144 assert((m & (m + 1)) == 0);
3146 hashcode_type
const h = hasher{}(key);
3148 persistent_node_ptr_t node;
3149 size_t new_size = 0;
3150 bool inserted =
false;
3156 scoped_lock_traits_type::initial_rw_state(
true));
3157 node = get_node<true>(key, b);
3161 if (check_mask_race(h, m)) {
3167 new_size = insert_new_node(b.get(), node,
3168 std::forward<Args>(args)...);
3175 result, node.get(this->my_pool_uuid)->mutex, write))
3182 std::this_thread::yield();
3184 m = mask().load(std::memory_order_acquire);
3185 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3186 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3191 result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
3192 result->my_hash = h;
3195 check_growth(m, new_size);
3200 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3201 typename MutexType,
typename ScopedLockType>
3202 template <
typename K>
3204 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3205 ScopedLockType>::internal_erase(
const K &key)
3208 hashcode_type
const h = hasher{}(key);
3209 hashcode_type m = mask().load(std::memory_order_acquire);
3210 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3211 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3214 pool_base pop = get_pool_base();
3219 bucket_accessor b(
this, h & m,
3220 scoped_lock_traits_type::initial_rw_state(
true));
3223 node_ptr_t *p = &b->node_list;
3228 detail::static_persistent_pool_pointer_cast<node>(
3229 n)(this->my_pool_uuid)
3231 p = &n(this->my_pool_uuid)->next;
3237 if (check_mask_race(h, m))
3241 }
else if (!b.is_writer() &&
3242 !scoped_lock_traits_type::upgrade_to_writer(b)) {
3243 if (check_mask_race(h, m))
3249 persistent_ptr<node> del = n(this->my_pool_uuid);
3257 if (!try_acquire_item(&acc, del->mutex,
true)) {
3261 std::this_thread::yield();
3263 m = mask().load(std::memory_order_acquire);
3269 assert(pmemobj_tx_stage() == TX_STAGE_NONE);
3271 auto &size_diff = this->thread_size_diff();
3288 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3289 typename MutexType,
typename ScopedLockType>
3294 internal_swap(table);
3297 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3298 typename MutexType,
typename ScopedLockType>
3303 concurrent_hash_map_internal::check_outside_tx();
3306 hashcode_type m = mask();
3310 hashcode_type b = (m + 1) >> 1;
3313 assert((b & (b - 1)) == 0);
3315 for (; b <= m; ++b) {
3316 bucket *bp = get_bucket(b);
3318 concurrent_hash_map_internal::assert_not_locked<mutex_t,
3322 if (bp->is_rehashed(std::memory_order_relaxed) ==
false)
3323 rehash_bucket<true>(bp, b);
3327 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3328 typename MutexType,
typename ScopedLockType>
3332 hashcode_type m = mask();
3334 assert((m & (m + 1)) == 0);
3338 for (segment_index_t b = 0; b <= m; ++b) {
3339 bucket *bp = get_bucket(b);
3340 concurrent_hash_map_internal::assert_not_locked<mutex_t,
3351 assert(this->tls_ptr !=
nullptr);
3352 this->tls_ptr->clear();
3354 this->on_init_size = 0;
3356 segment_index_t s = segment_traits_t::segment_index_of(m);
3358 assert(s + 1 == this->block_table_size ||
3359 !segment_facade_t(this->my_table, s + 1).is_valid());
3374 mask().store(embedded_buckets - 1, std::memory_order_relaxed);
3381 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3382 typename MutexType,
typename ScopedLockType>
3385 ScopedLockType>::clear_segment(segment_index_t s)
3387 segment_facade_t segment(this->my_table, s);
3389 assert(segment.is_valid());
3391 size_type sz = segment.size();
3392 for (segment_index_t i = 0; i < sz; ++i) {
3393 for (node_ptr_t n = segment[i].node_list; n;
3394 n = segment[i].node_list) {
3395 segment[i].node_list = n(this->my_pool_uuid)->next;
3400 if (s >= segment_traits_t::embedded_segments)
3404 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3405 typename MutexType,
typename ScopedLockType>
3410 auto pop = get_pool_base();
3412 reserve(source.
size());
3413 internal_copy(source.
begin(), source.
end());
3416 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3417 typename MutexType,
typename ScopedLockType>
3418 template <
typename I>
3421 ScopedLockType>::internal_copy(I first, I last)
3423 hashcode_type m = mask();
3425 for (; first != last; ++first) {
3426 hashcode_type h = hasher{}(first->first);
3427 bucket *b = get_bucket(h & m);
3429 assert(b->is_rehashed(std::memory_order_relaxed));
3431 detail::persistent_pool_ptr<node> p;
3432 insert_new_node(b, p, *first);
3436 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3437 typename MutexType,
typename ScopedLockType>
3439 operator==(
const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3441 const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3444 if (a.size() != b.size())
3447 typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3448 ScopedLockType>::const_iterator
3452 typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3453 ScopedLockType>::const_iterator j,
3456 for (; i != i_end; ++i) {
3457 j = b.equal_range(i->first).first;
3459 if (j == j_end || !(i->second == j->second))
3466 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3467 typename MutexType,
typename ScopedLockType>
3469 operator!=(
const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3471 const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3477 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3478 typename MutexType,
typename ScopedLockType>
3480 swap(concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &a,
3481 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &b)