libstdc++
bits/hashtable.h
Go to the documentation of this file.
1// hashtable.h header -*- C++ -*-
2
3// Copyright (C) 2007-2022 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/hashtable.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
28 */
29
30#ifndef _HASHTABLE_H
31#define _HASHTABLE_H 1
32
33#pragma GCC system_header
34
37#if __cplusplus > 201402L
38# include <bits/node_handle.h>
39#endif
40
41namespace std _GLIBCXX_VISIBILITY(default)
42{
43_GLIBCXX_BEGIN_NAMESPACE_VERSION
44/// @cond undocumented
45
46 template<typename _Tp, typename _Hash>
47 using __cache_default
48 = __not_<__and_<// Do not cache for fast hasher.
49 __is_fast_hash<_Hash>,
50 // Mandatory to have erase not throwing.
51 __is_nothrow_invocable<const _Hash&, const _Tp&>>>;
52
53 // Helper to conditionally delete the default constructor.
54 // The _Hash_node_base type is used to distinguish this specialization
55 // from any other potentially-overlapping subobjects of the hashtable.
56 template<typename _Equal, typename _Hash, typename _Allocator>
57 using _Hashtable_enable_default_ctor
58 = _Enable_default_constructor<__and_<is_default_constructible<_Equal>,
59 is_default_constructible<_Hash>,
60 is_default_constructible<_Allocator>>{},
61 __detail::_Hash_node_base>;
62
63 /**
64 * Primary class template _Hashtable.
65 *
66 * @ingroup hashtable-detail
67 *
68 * @tparam _Value CopyConstructible type.
69 *
70 * @tparam _Key CopyConstructible type.
71 *
72 * @tparam _Alloc An allocator type
73 * ([lib.allocator.requirements]) whose _Alloc::value_type is
74 * _Value. As a conforming extension, we allow for
75 * _Alloc::value_type != _Value.
76 *
77 * @tparam _ExtractKey Function object that takes an object of type
78 * _Value and returns a value of type _Key.
79 *
80 * @tparam _Equal Function object that takes two objects of type k
81 * and returns a bool-like value that is true if the two objects
82 * are considered equal.
83 *
84 * @tparam _Hash The hash function. A unary function object with
85 * argument type _Key and result type size_t. Return values should
86 * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
87 *
88 * @tparam _RangeHash The range-hashing function (in the terminology of
89 * Tavori and Dreizin). A binary function object whose argument
90 * types and result type are all size_t. Given arguments r and N,
91 * the return value is in the range [0, N).
92 *
93 * @tparam _Unused Not used.
94 *
95 * @tparam _RehashPolicy Policy class with three members, all of
96 * which govern the bucket count. _M_next_bkt(n) returns a bucket
97 * count no smaller than n. _M_bkt_for_elements(n) returns a
98 * bucket count appropriate for an element count of n.
99 * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
100 * current bucket count is n_bkt and the current element count is
101 * n_elt, we need to increase the bucket count for n_ins insertions.
102 * If so, returns make_pair(true, n), where n is the new bucket count. If
103 * not, returns make_pair(false, <anything>)
104 *
105 * @tparam _Traits Compile-time class with three boolean
106 * std::integral_constant members: __cache_hash_code, __constant_iterators,
107 * __unique_keys.
108 *
109 * Each _Hashtable data structure has:
110 *
111 * - _Bucket[] _M_buckets
112 * - _Hash_node_base _M_before_begin
113 * - size_type _M_bucket_count
114 * - size_type _M_element_count
115 *
116 * with _Bucket being _Hash_node_base* and _Hash_node containing:
117 *
118 * - _Hash_node* _M_next
119 * - Tp _M_value
120 * - size_t _M_hash_code if cache_hash_code is true
121 *
122 * In terms of Standard containers the hashtable is like the aggregation of:
123 *
124 * - std::forward_list<_Node> containing the elements
125 * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
126 *
127 * The non-empty buckets contain the node before the first node in the
128 * bucket. This design makes it possible to implement something like a
129 * std::forward_list::insert_after on container insertion and
130 * std::forward_list::erase_after on container erase
131 * calls. _M_before_begin is equivalent to
132 * std::forward_list::before_begin. Empty buckets contain
133 * nullptr. Note that one of the non-empty buckets contains
134 * &_M_before_begin which is not a dereferenceable node so the
135 * node pointer in a bucket shall never be dereferenced, only its
136 * next node can be.
137 *
138 * Walking through a bucket's nodes requires a check on the hash code to
139 * see if each node is still in the bucket. Such a design assumes a
140 * quite efficient hash functor and is one of the reasons it is
141 * highly advisable to set __cache_hash_code to true.
142 *
143 * The container iterators are simply built from nodes. This way
144 * incrementing the iterator is perfectly efficient independent of
145 * how many empty buckets there are in the container.
146 *
147 * On insert we compute the element's hash code and use it to find the
148 * bucket index. If the element must be inserted in an empty bucket
149 * we add it at the beginning of the singly linked list and make the
150 * bucket point to _M_before_begin. The bucket that used to point to
151 * _M_before_begin, if any, is updated to point to its new before
152 * begin node.
153 *
154 * On erase, the simple iterator design requires using the hash
155 * functor to get the index of the bucket to update. For this
156 * reason, when __cache_hash_code is set to false the hash functor must
157 * not throw and this is enforced by a static assertion.
158 *
159 * Functionality is implemented by decomposition into base classes,
160 * where the derived _Hashtable class is used in _Map_base,
161 * _Insert, _Rehash_base, and _Equality base classes to access the
162 * "this" pointer. _Hashtable_base is used in the base classes as a
163 * non-recursive, fully-completed-type so that detailed nested type
164 * information, such as iterator type and node type, can be
165 * used. This is similar to the "Curiously Recurring Template
166 * Pattern" (CRTP) technique, but uses a reconstructed, not
167 * explicitly passed, template pattern.
168 *
169 * Base class templates are:
170 * - __detail::_Hashtable_base
171 * - __detail::_Map_base
172 * - __detail::_Insert
173 * - __detail::_Rehash_base
174 * - __detail::_Equality
175 */
176 template<typename _Key, typename _Value, typename _Alloc,
177 typename _ExtractKey, typename _Equal,
178 typename _Hash, typename _RangeHash, typename _Unused,
179 typename _RehashPolicy, typename _Traits>
180 class _Hashtable
181 : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
182 _Hash, _RangeHash, _Unused, _Traits>,
183 public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
184 _Hash, _RangeHash, _Unused,
185 _RehashPolicy, _Traits>,
186 public __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey, _Equal,
187 _Hash, _RangeHash, _Unused,
188 _RehashPolicy, _Traits>,
189 public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
190 _Hash, _RangeHash, _Unused,
191 _RehashPolicy, _Traits>,
192 public __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, _Equal,
193 _Hash, _RangeHash, _Unused,
194 _RehashPolicy, _Traits>,
195 private __detail::_Hashtable_alloc<
196 __alloc_rebind<_Alloc,
197 __detail::_Hash_node<_Value,
198 _Traits::__hash_cached::value>>>,
199 private _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>
200 {
201 static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
202 "unordered container must have a non-const, non-volatile value_type");
203#if __cplusplus > 201703L || defined __STRICT_ANSI__
204 static_assert(is_same<typename _Alloc::value_type, _Value>{},
205 "unordered container must have the same value_type as its allocator");
206#endif
207
208 using __traits_type = _Traits;
209 using __hash_cached = typename __traits_type::__hash_cached;
210 using __constant_iterators = typename __traits_type::__constant_iterators;
211 using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
212 using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
213
214 using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
215
216 using __node_value_type =
217 __detail::_Hash_node_value<_Value, __hash_cached::value>;
218 using __node_ptr = typename __hashtable_alloc::__node_ptr;
219 using __value_alloc_traits =
220 typename __hashtable_alloc::__value_alloc_traits;
221 using __node_alloc_traits =
222 typename __hashtable_alloc::__node_alloc_traits;
223 using __node_base = typename __hashtable_alloc::__node_base;
224 using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
225 using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
226
227 using __insert_base = __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey,
228 _Equal, _Hash,
229 _RangeHash, _Unused,
230 _RehashPolicy, _Traits>;
231 using __enable_default_ctor
232 = _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>;
233
234 public:
235 typedef _Key key_type;
236 typedef _Value value_type;
237 typedef _Alloc allocator_type;
238 typedef _Equal key_equal;
239
240 // mapped_type, if present, comes from _Map_base.
241 // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
242 typedef typename __value_alloc_traits::pointer pointer;
243 typedef typename __value_alloc_traits::const_pointer const_pointer;
244 typedef value_type& reference;
245 typedef const value_type& const_reference;
246
247 using iterator = typename __insert_base::iterator;
248
249 using const_iterator = typename __insert_base::const_iterator;
250
251 using local_iterator = __detail::_Local_iterator<key_type, _Value,
252 _ExtractKey, _Hash, _RangeHash, _Unused,
253 __constant_iterators::value,
254 __hash_cached::value>;
255
256 using const_local_iterator = __detail::_Local_const_iterator<
257 key_type, _Value,
258 _ExtractKey, _Hash, _RangeHash, _Unused,
259 __constant_iterators::value, __hash_cached::value>;
260
261 private:
262 using __rehash_type = _RehashPolicy;
263 using __rehash_state = typename __rehash_type::_State;
264
265 using __unique_keys = typename __traits_type::__unique_keys;
266
267 using __hashtable_base = __detail::
268 _Hashtable_base<_Key, _Value, _ExtractKey,
269 _Equal, _Hash, _RangeHash, _Unused, _Traits>;
270
271 using __hash_code_base = typename __hashtable_base::__hash_code_base;
272 using __hash_code = typename __hashtable_base::__hash_code;
273 using __ireturn_type = typename __insert_base::__ireturn_type;
274
275 using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
276 _Equal, _Hash, _RangeHash, _Unused,
277 _RehashPolicy, _Traits>;
278
279 using __rehash_base = __detail::_Rehash_base<_Key, _Value, _Alloc,
280 _ExtractKey, _Equal,
281 _Hash, _RangeHash, _Unused,
282 _RehashPolicy, _Traits>;
283
284 using __eq_base = __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey,
285 _Equal, _Hash, _RangeHash, _Unused,
286 _RehashPolicy, _Traits>;
287
288 using __reuse_or_alloc_node_gen_t =
289 __detail::_ReuseOrAllocNode<__node_alloc_type>;
290 using __alloc_node_gen_t =
291 __detail::_AllocNode<__node_alloc_type>;
292 using __node_builder_t =
293 __detail::_NodeBuilder<_ExtractKey>;
294
295 // Simple RAII type for managing a node containing an element
296 struct _Scoped_node
297 {
298 // Take ownership of a node with a constructed element.
299 _Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
300 : _M_h(__h), _M_node(__n) { }
301
302 // Allocate a node and construct an element within it.
303 template<typename... _Args>
304 _Scoped_node(__hashtable_alloc* __h, _Args&&... __args)
305 : _M_h(__h),
306 _M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...))
307 { }
308
309 // Destroy element and deallocate node.
310 ~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); };
311
312 _Scoped_node(const _Scoped_node&) = delete;
313 _Scoped_node& operator=(const _Scoped_node&) = delete;
314
315 __hashtable_alloc* _M_h;
316 __node_ptr _M_node;
317 };
318
319 template<typename _Ht>
320 static constexpr
321 __conditional_t<std::is_lvalue_reference<_Ht>::value,
322 const value_type&, value_type&&>
323 __fwd_value_for(value_type& __val) noexcept
324 { return std::move(__val); }
325
326 // Compile-time diagnostics.
327
328 // _Hash_code_base has everything protected, so use this derived type to
329 // access it.
330 struct __hash_code_base_access : __hash_code_base
331 { using __hash_code_base::_M_bucket_index; };
332
333 // To get bucket index we need _RangeHash not to throw.
334 static_assert(is_nothrow_default_constructible<_RangeHash>::value,
335 "Functor used to map hash code to bucket index"
336 " must be nothrow default constructible");
337 static_assert(noexcept(
338 std::declval<const _RangeHash&>()((std::size_t)0, (std::size_t)0)),
339 "Functor used to map hash code to bucket index must be"
340 " noexcept");
341
342 // To compute bucket index we also need _ExtratKey not to throw.
343 static_assert(is_nothrow_default_constructible<_ExtractKey>::value,
344 "_ExtractKey must be nothrow default constructible");
345 static_assert(noexcept(
346 std::declval<const _ExtractKey&>()(std::declval<_Value>())),
347 "_ExtractKey functor must be noexcept invocable");
348
349 template<typename _Keya, typename _Valuea, typename _Alloca,
350 typename _ExtractKeya, typename _Equala,
351 typename _Hasha, typename _RangeHasha, typename _Unuseda,
352 typename _RehashPolicya, typename _Traitsa,
353 bool _Unique_keysa>
354 friend struct __detail::_Map_base;
355
356 template<typename _Keya, typename _Valuea, typename _Alloca,
357 typename _ExtractKeya, typename _Equala,
358 typename _Hasha, typename _RangeHasha, typename _Unuseda,
359 typename _RehashPolicya, typename _Traitsa>
360 friend struct __detail::_Insert_base;
361
362 template<typename _Keya, typename _Valuea, typename _Alloca,
363 typename _ExtractKeya, typename _Equala,
364 typename _Hasha, typename _RangeHasha, typename _Unuseda,
365 typename _RehashPolicya, typename _Traitsa,
366 bool _Constant_iteratorsa>
367 friend struct __detail::_Insert;
368
369 template<typename _Keya, typename _Valuea, typename _Alloca,
370 typename _ExtractKeya, typename _Equala,
371 typename _Hasha, typename _RangeHasha, typename _Unuseda,
372 typename _RehashPolicya, typename _Traitsa,
373 bool _Unique_keysa>
374 friend struct __detail::_Equality;
375
376 public:
377 using size_type = typename __hashtable_base::size_type;
378 using difference_type = typename __hashtable_base::difference_type;
379
380#if __cplusplus > 201402L
381 using node_type = _Node_handle<_Key, _Value, __node_alloc_type>;
382 using insert_return_type = _Node_insert_return<iterator, node_type>;
383#endif
384
385 private:
386 __buckets_ptr _M_buckets = &_M_single_bucket;
387 size_type _M_bucket_count = 1;
388 __node_base _M_before_begin;
389 size_type _M_element_count = 0;
390 _RehashPolicy _M_rehash_policy;
391
392 // A single bucket used when only need for 1 bucket. Especially
393 // interesting in move semantic to leave hashtable with only 1 bucket
394 // which is not allocated so that we can have those operations noexcept
395 // qualified.
396 // Note that we can't leave hashtable with 0 bucket without adding
397 // numerous checks in the code to avoid 0 modulus.
398 __node_base_ptr _M_single_bucket = nullptr;
399
400 void
401 _M_update_bbegin()
402 {
403 if (_M_begin())
404 _M_buckets[_M_bucket_index(*_M_begin())] = &_M_before_begin;
405 }
406
407 void
408 _M_update_bbegin(__node_ptr __n)
409 {
410 _M_before_begin._M_nxt = __n;
411 _M_update_bbegin();
412 }
413
414 bool
415 _M_uses_single_bucket(__buckets_ptr __bkts) const
416 { return __builtin_expect(__bkts == &_M_single_bucket, false); }
417
418 bool
419 _M_uses_single_bucket() const
420 { return _M_uses_single_bucket(_M_buckets); }
421
422 static constexpr size_t
423 __small_size_threshold() noexcept
424 {
425 return
426 __detail::_Hashtable_hash_traits<_Hash>::__small_size_threshold();
427 }
428
429 __hashtable_alloc&
430 _M_base_alloc() { return *this; }
431
432 __buckets_ptr
433 _M_allocate_buckets(size_type __bkt_count)
434 {
435 if (__builtin_expect(__bkt_count == 1, false))
436 {
437 _M_single_bucket = nullptr;
438 return &_M_single_bucket;
439 }
440
441 return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
442 }
443
444 void
445 _M_deallocate_buckets(__buckets_ptr __bkts, size_type __bkt_count)
446 {
447 if (_M_uses_single_bucket(__bkts))
448 return;
449
450 __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
451 }
452
453 void
454 _M_deallocate_buckets()
455 { _M_deallocate_buckets(_M_buckets, _M_bucket_count); }
456
457 // Gets bucket begin, deals with the fact that non-empty buckets contain
458 // their before begin node.
459 __node_ptr
460 _M_bucket_begin(size_type __bkt) const;
461
462 __node_ptr
463 _M_begin() const
464 { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
465
466 // Assign *this using another _Hashtable instance. Whether elements
467 // are copied or moved depends on the _Ht reference.
468 template<typename _Ht>
469 void
470 _M_assign_elements(_Ht&&);
471
472 template<typename _Ht, typename _NodeGenerator>
473 void
474 _M_assign(_Ht&&, const _NodeGenerator&);
475
476 void
477 _M_move_assign(_Hashtable&&, true_type);
478
479 void
480 _M_move_assign(_Hashtable&&, false_type);
481
482 void
483 _M_reset() noexcept;
484
485 _Hashtable(const _Hash& __h, const _Equal& __eq,
486 const allocator_type& __a)
487 : __hashtable_base(__h, __eq),
488 __hashtable_alloc(__node_alloc_type(__a)),
489 __enable_default_ctor(_Enable_default_constructor_tag{})
490 { }
491
492 template<bool _No_realloc = true>
493 static constexpr bool
494 _S_nothrow_move()
495 {
496#if __cplusplus <= 201402L
497 return __and_<__bool_constant<_No_realloc>,
498 is_nothrow_copy_constructible<_Hash>,
499 is_nothrow_copy_constructible<_Equal>>::value;
500#else
501 if constexpr (_No_realloc)
502 if constexpr (is_nothrow_copy_constructible<_Hash>())
503 return is_nothrow_copy_constructible<_Equal>();
504 return false;
505#endif
506 }
507
508 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
509 true_type /* alloc always equal */)
510 noexcept(_S_nothrow_move());
511
512 _Hashtable(_Hashtable&&, __node_alloc_type&&,
513 false_type /* alloc always equal */);
514
515 template<typename _InputIterator>
516 _Hashtable(_InputIterator __first, _InputIterator __last,
517 size_type __bkt_count_hint,
518 const _Hash&, const _Equal&, const allocator_type&,
519 true_type __uks);
520
521 template<typename _InputIterator>
522 _Hashtable(_InputIterator __first, _InputIterator __last,
523 size_type __bkt_count_hint,
524 const _Hash&, const _Equal&, const allocator_type&,
525 false_type __uks);
526
527 public:
528 // Constructor, destructor, assignment, swap
529 _Hashtable() = default;
530
531 _Hashtable(const _Hashtable&);
532
533 _Hashtable(const _Hashtable&, const allocator_type&);
534
535 explicit
536 _Hashtable(size_type __bkt_count_hint,
537 const _Hash& __hf = _Hash(),
538 const key_equal& __eql = key_equal(),
539 const allocator_type& __a = allocator_type());
540
541 // Use delegating constructors.
542 _Hashtable(_Hashtable&& __ht)
543 noexcept(_S_nothrow_move())
544 : _Hashtable(std::move(__ht), std::move(__ht._M_node_allocator()),
545 true_type{})
546 { }
547
548 _Hashtable(_Hashtable&& __ht, const allocator_type& __a)
549 noexcept(_S_nothrow_move<__node_alloc_traits::_S_always_equal()>())
550 : _Hashtable(std::move(__ht), __node_alloc_type(__a),
551 typename __node_alloc_traits::is_always_equal{})
552 { }
553
554 explicit
555 _Hashtable(const allocator_type& __a)
556 : __hashtable_alloc(__node_alloc_type(__a)),
557 __enable_default_ctor(_Enable_default_constructor_tag{})
558 { }
559
560 template<typename _InputIterator>
561 _Hashtable(_InputIterator __f, _InputIterator __l,
562 size_type __bkt_count_hint = 0,
563 const _Hash& __hf = _Hash(),
564 const key_equal& __eql = key_equal(),
565 const allocator_type& __a = allocator_type())
566 : _Hashtable(__f, __l, __bkt_count_hint, __hf, __eql, __a,
567 __unique_keys{})
568 { }
569
570 _Hashtable(initializer_list<value_type> __l,
571 size_type __bkt_count_hint = 0,
572 const _Hash& __hf = _Hash(),
573 const key_equal& __eql = key_equal(),
574 const allocator_type& __a = allocator_type())
575 : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
576 __hf, __eql, __a, __unique_keys{})
577 { }
578
579 _Hashtable&
580 operator=(const _Hashtable& __ht);
581
582 _Hashtable&
583 operator=(_Hashtable&& __ht)
584 noexcept(__node_alloc_traits::_S_nothrow_move()
585 && is_nothrow_move_assignable<_Hash>::value
586 && is_nothrow_move_assignable<_Equal>::value)
587 {
588 constexpr bool __move_storage =
589 __node_alloc_traits::_S_propagate_on_move_assign()
590 || __node_alloc_traits::_S_always_equal();
591 _M_move_assign(std::move(__ht), __bool_constant<__move_storage>());
592 return *this;
593 }
594
595 _Hashtable&
596 operator=(initializer_list<value_type> __l)
597 {
598 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
599 _M_before_begin._M_nxt = nullptr;
600 clear();
601
602 // We consider that all elements of __l are going to be inserted.
603 auto __l_bkt_count = _M_rehash_policy._M_bkt_for_elements(__l.size());
604
605 // Do not shrink to keep potential user reservation.
606 if (_M_bucket_count < __l_bkt_count)
607 rehash(__l_bkt_count);
608
609 this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys{});
610 return *this;
611 }
612
613 ~_Hashtable() noexcept;
614
615 void
616 swap(_Hashtable&)
617 noexcept(__and_<__is_nothrow_swappable<_Hash>,
618 __is_nothrow_swappable<_Equal>>::value);
619
620 // Basic container operations
621 iterator
622 begin() noexcept
623 { return iterator(_M_begin()); }
624
625 const_iterator
626 begin() const noexcept
627 { return const_iterator(_M_begin()); }
628
629 iterator
630 end() noexcept
631 { return iterator(nullptr); }
632
633 const_iterator
634 end() const noexcept
635 { return const_iterator(nullptr); }
636
637 const_iterator
638 cbegin() const noexcept
639 { return const_iterator(_M_begin()); }
640
641 const_iterator
642 cend() const noexcept
643 { return const_iterator(nullptr); }
644
645 size_type
646 size() const noexcept
647 { return _M_element_count; }
648
649 _GLIBCXX_NODISCARD bool
650 empty() const noexcept
651 { return size() == 0; }
652
653 allocator_type
654 get_allocator() const noexcept
655 { return allocator_type(this->_M_node_allocator()); }
656
657 size_type
658 max_size() const noexcept
659 { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
660
661 // Observers
662 key_equal
663 key_eq() const
664 { return this->_M_eq(); }
665
666 // hash_function, if present, comes from _Hash_code_base.
667
668 // Bucket operations
669 size_type
670 bucket_count() const noexcept
671 { return _M_bucket_count; }
672
673 size_type
674 max_bucket_count() const noexcept
675 { return max_size(); }
676
677 size_type
678 bucket_size(size_type __bkt) const
679 { return std::distance(begin(__bkt), end(__bkt)); }
680
681 size_type
682 bucket(const key_type& __k) const
683 { return _M_bucket_index(this->_M_hash_code(__k)); }
684
685 local_iterator
686 begin(size_type __bkt)
687 {
688 return local_iterator(*this, _M_bucket_begin(__bkt),
689 __bkt, _M_bucket_count);
690 }
691
692 local_iterator
693 end(size_type __bkt)
694 { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
695
696 const_local_iterator
697 begin(size_type __bkt) const
698 {
699 return const_local_iterator(*this, _M_bucket_begin(__bkt),
700 __bkt, _M_bucket_count);
701 }
702
703 const_local_iterator
704 end(size_type __bkt) const
705 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
706
707 // DR 691.
708 const_local_iterator
709 cbegin(size_type __bkt) const
710 {
711 return const_local_iterator(*this, _M_bucket_begin(__bkt),
712 __bkt, _M_bucket_count);
713 }
714
715 const_local_iterator
716 cend(size_type __bkt) const
717 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
718
719 float
720 load_factor() const noexcept
721 {
722 return static_cast<float>(size()) / static_cast<float>(bucket_count());
723 }
724
725 // max_load_factor, if present, comes from _Rehash_base.
726
727 // Generalization of max_load_factor. Extension, not found in
728 // TR1. Only useful if _RehashPolicy is something other than
729 // the default.
730 const _RehashPolicy&
731 __rehash_policy() const
732 { return _M_rehash_policy; }
733
734 void
735 __rehash_policy(const _RehashPolicy& __pol)
736 { _M_rehash_policy = __pol; }
737
738 // Lookup.
739 iterator
740 find(const key_type& __k);
741
742 const_iterator
743 find(const key_type& __k) const;
744
745 size_type
746 count(const key_type& __k) const;
747
749 equal_range(const key_type& __k);
750
752 equal_range(const key_type& __k) const;
753
754#if __cplusplus >= 202002L
755#define __cpp_lib_generic_unordered_lookup 201811L
756
757 template<typename _Kt,
758 typename = __has_is_transparent_t<_Hash, _Kt>,
759 typename = __has_is_transparent_t<_Equal, _Kt>>
760 iterator
761 _M_find_tr(const _Kt& __k);
762
763 template<typename _Kt,
764 typename = __has_is_transparent_t<_Hash, _Kt>,
765 typename = __has_is_transparent_t<_Equal, _Kt>>
766 const_iterator
767 _M_find_tr(const _Kt& __k) const;
768
769 template<typename _Kt,
770 typename = __has_is_transparent_t<_Hash, _Kt>,
771 typename = __has_is_transparent_t<_Equal, _Kt>>
772 size_type
773 _M_count_tr(const _Kt& __k) const;
774
775 template<typename _Kt,
776 typename = __has_is_transparent_t<_Hash, _Kt>,
777 typename = __has_is_transparent_t<_Equal, _Kt>>
778 pair<iterator, iterator>
779 _M_equal_range_tr(const _Kt& __k);
780
781 template<typename _Kt,
782 typename = __has_is_transparent_t<_Hash, _Kt>,
783 typename = __has_is_transparent_t<_Equal, _Kt>>
784 pair<const_iterator, const_iterator>
785 _M_equal_range_tr(const _Kt& __k) const;
786#endif // C++20
787
788 private:
789 // Bucket index computation helpers.
790 size_type
791 _M_bucket_index(const __node_value_type& __n) const noexcept
792 { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
793
794 size_type
795 _M_bucket_index(__hash_code __c) const
796 { return __hash_code_base::_M_bucket_index(__c, _M_bucket_count); }
797
798 __node_base_ptr
799 _M_find_before_node(const key_type&);
800
801 // Find and insert helper functions and types
802 // Find the node before the one matching the criteria.
803 __node_base_ptr
804 _M_find_before_node(size_type, const key_type&, __hash_code) const;
805
806 template<typename _Kt>
807 __node_base_ptr
808 _M_find_before_node_tr(size_type, const _Kt&, __hash_code) const;
809
810 __node_ptr
811 _M_find_node(size_type __bkt, const key_type& __key,
812 __hash_code __c) const
813 {
814 __node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c);
815 if (__before_n)
816 return static_cast<__node_ptr>(__before_n->_M_nxt);
817 return nullptr;
818 }
819
820 template<typename _Kt>
821 __node_ptr
822 _M_find_node_tr(size_type __bkt, const _Kt& __key,
823 __hash_code __c) const
824 {
825 auto __before_n = _M_find_before_node_tr(__bkt, __key, __c);
826 if (__before_n)
827 return static_cast<__node_ptr>(__before_n->_M_nxt);
828 return nullptr;
829 }
830
831 // Insert a node at the beginning of a bucket.
832 void
833 _M_insert_bucket_begin(size_type, __node_ptr);
834
835 // Remove the bucket first node
836 void
837 _M_remove_bucket_begin(size_type __bkt, __node_ptr __next_n,
838 size_type __next_bkt);
839
840 // Get the node before __n in the bucket __bkt
841 __node_base_ptr
842 _M_get_previous_node(size_type __bkt, __node_ptr __n);
843
844 pair<const_iterator, __hash_code>
845 _M_compute_hash_code(const_iterator __hint, const key_type& __k) const;
846
847 // Insert node __n with hash code __code, in bucket __bkt if no
848 // rehash (assumes no element with same key already present).
849 // Takes ownership of __n if insertion succeeds, throws otherwise.
850 iterator
851 _M_insert_unique_node(size_type __bkt, __hash_code,
852 __node_ptr __n, size_type __n_elt = 1);
853
854 // Insert node __n with key __k and hash code __code.
855 // Takes ownership of __n if insertion succeeds, throws otherwise.
856 iterator
857 _M_insert_multi_node(__node_ptr __hint,
858 __hash_code __code, __node_ptr __n);
859
860 template<typename... _Args>
862 _M_emplace(true_type __uks, _Args&&... __args);
863
864 template<typename... _Args>
865 iterator
866 _M_emplace(false_type __uks, _Args&&... __args)
867 { return _M_emplace(cend(), __uks, std::forward<_Args>(__args)...); }
868
869 // Emplace with hint, useless when keys are unique.
870 template<typename... _Args>
871 iterator
872 _M_emplace(const_iterator, true_type __uks, _Args&&... __args)
873 { return _M_emplace(__uks, std::forward<_Args>(__args)...).first; }
874
875 template<typename... _Args>
876 iterator
877 _M_emplace(const_iterator, false_type __uks, _Args&&... __args);
878
879 template<typename _Kt, typename _Arg, typename _NodeGenerator>
881 _M_insert_unique(_Kt&&, _Arg&&, const _NodeGenerator&);
882
883 template<typename _Kt>
884 static __conditional_t<
885 __and_<__is_nothrow_invocable<_Hash&, const key_type&>,
886 __not_<__is_nothrow_invocable<_Hash&, _Kt>>>::value,
887 key_type, _Kt&&>
888 _S_forward_key(_Kt&& __k)
889 { return std::forward<_Kt>(__k); }
890
891 static const key_type&
892 _S_forward_key(const key_type& __k)
893 { return __k; }
894
895 static key_type&&
896 _S_forward_key(key_type&& __k)
897 { return std::move(__k); }
898
899 template<typename _Arg, typename _NodeGenerator>
901 _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
902 true_type /* __uks */)
903 {
904 return _M_insert_unique(
905 _S_forward_key(_ExtractKey{}(std::forward<_Arg>(__arg))),
906 std::forward<_Arg>(__arg), __node_gen);
907 }
908
909 template<typename _Arg, typename _NodeGenerator>
910 iterator
911 _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
912 false_type __uks)
913 {
914 return _M_insert(cend(), std::forward<_Arg>(__arg), __node_gen,
915 __uks);
916 }
917
918 // Insert with hint, not used when keys are unique.
919 template<typename _Arg, typename _NodeGenerator>
920 iterator
921 _M_insert(const_iterator, _Arg&& __arg,
922 const _NodeGenerator& __node_gen, true_type __uks)
923 {
924 return
925 _M_insert(std::forward<_Arg>(__arg), __node_gen, __uks).first;
926 }
927
928 // Insert with hint when keys are not unique.
929 template<typename _Arg, typename _NodeGenerator>
930 iterator
931 _M_insert(const_iterator, _Arg&&,
932 const _NodeGenerator&, false_type __uks);
933
934 size_type
935 _M_erase(true_type __uks, const key_type&);
936
937 size_type
938 _M_erase(false_type __uks, const key_type&);
939
940 iterator
941 _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
942
943 public:
944 // Emplace
945 template<typename... _Args>
946 __ireturn_type
947 emplace(_Args&&... __args)
948 { return _M_emplace(__unique_keys{}, std::forward<_Args>(__args)...); }
949
950 template<typename... _Args>
951 iterator
952 emplace_hint(const_iterator __hint, _Args&&... __args)
953 {
954 return _M_emplace(__hint, __unique_keys{},
955 std::forward<_Args>(__args)...);
956 }
957
958 // Insert member functions via inheritance.
959
960 // Erase
961 iterator
962 erase(const_iterator);
963
964 // LWG 2059.
965 iterator
966 erase(iterator __it)
967 { return erase(const_iterator(__it)); }
968
969 size_type
970 erase(const key_type& __k)
971 { return _M_erase(__unique_keys{}, __k); }
972
973 iterator
974 erase(const_iterator, const_iterator);
975
976 void
977 clear() noexcept;
978
979 // Set number of buckets keeping it appropriate for container's number
980 // of elements.
981 void rehash(size_type __bkt_count);
982
983 // DR 1189.
984 // reserve, if present, comes from _Rehash_base.
985
986#if __cplusplus > 201402L
987 /// Re-insert an extracted node into a container with unique keys.
988 insert_return_type
989 _M_reinsert_node(node_type&& __nh)
990 {
991 insert_return_type __ret;
992 if (__nh.empty())
993 __ret.position = end();
994 else
995 {
996 __glibcxx_assert(get_allocator() == __nh.get_allocator());
997
998 const key_type& __k = __nh._M_key();
999 __hash_code __code = this->_M_hash_code(__k);
1000 size_type __bkt = _M_bucket_index(__code);
1001 if (__node_ptr __n = _M_find_node(__bkt, __k, __code))
1002 {
1003 __ret.node = std::move(__nh);
1004 __ret.position = iterator(__n);
1005 __ret.inserted = false;
1006 }
1007 else
1008 {
1009 __ret.position
1010 = _M_insert_unique_node(__bkt, __code, __nh._M_ptr);
1011 __nh._M_ptr = nullptr;
1012 __ret.inserted = true;
1013 }
1014 }
1015 return __ret;
1016 }
1017
1018 /// Re-insert an extracted node into a container with equivalent keys.
1019 iterator
1020 _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh)
1021 {
1022 if (__nh.empty())
1023 return end();
1024
1025 __glibcxx_assert(get_allocator() == __nh.get_allocator());
1026
1027 const key_type& __k = __nh._M_key();
1028 auto __code = this->_M_hash_code(__k);
1029 auto __ret
1030 = _M_insert_multi_node(__hint._M_cur, __code, __nh._M_ptr);
1031 __nh._M_ptr = nullptr;
1032 return __ret;
1033 }
1034
1035 private:
1036 node_type
1037 _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
1038 {
1039 __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
1040 if (__prev_n == _M_buckets[__bkt])
1041 _M_remove_bucket_begin(__bkt, __n->_M_next(),
1042 __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
1043 else if (__n->_M_nxt)
1044 {
1045 size_type __next_bkt = _M_bucket_index(*__n->_M_next());
1046 if (__next_bkt != __bkt)
1047 _M_buckets[__next_bkt] = __prev_n;
1048 }
1049
1050 __prev_n->_M_nxt = __n->_M_nxt;
1051 __n->_M_nxt = nullptr;
1052 --_M_element_count;
1053 return { __n, this->_M_node_allocator() };
1054 }
1055
1056 public:
1057 // Extract a node.
1058 node_type
1059 extract(const_iterator __pos)
1060 {
1061 size_t __bkt = _M_bucket_index(*__pos._M_cur);
1062 return _M_extract_node(__bkt,
1063 _M_get_previous_node(__bkt, __pos._M_cur));
1064 }
1065
1066 /// Extract a node.
1067 node_type
1068 extract(const _Key& __k)
1069 {
1070 node_type __nh;
1071 __hash_code __code = this->_M_hash_code(__k);
1072 std::size_t __bkt = _M_bucket_index(__code);
1073 if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
1074 __nh = _M_extract_node(__bkt, __prev_node);
1075 return __nh;
1076 }
1077
1078 /// Merge from a compatible container into one with unique keys.
1079 template<typename _Compatible_Hashtable>
1080 void
1081 _M_merge_unique(_Compatible_Hashtable& __src)
1082 {
1083 static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1084 node_type>, "Node types are compatible");
1085 __glibcxx_assert(get_allocator() == __src.get_allocator());
1086
1087 auto __n_elt = __src.size();
1088 for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1089 {
1090 auto __pos = __i++;
1091 const key_type& __k = _ExtractKey{}(*__pos);
1092 __hash_code __code
1093 = this->_M_hash_code(__src.hash_function(), *__pos._M_cur);
1094 size_type __bkt = _M_bucket_index(__code);
1095 if (_M_find_node(__bkt, __k, __code) == nullptr)
1096 {
1097 auto __nh = __src.extract(__pos);
1098 _M_insert_unique_node(__bkt, __code, __nh._M_ptr, __n_elt);
1099 __nh._M_ptr = nullptr;
1100 __n_elt = 1;
1101 }
1102 else if (__n_elt != 1)
1103 --__n_elt;
1104 }
1105 }
1106
1107 /// Merge from a compatible container into one with equivalent keys.
1108 template<typename _Compatible_Hashtable>
1109 void
1110 _M_merge_multi(_Compatible_Hashtable& __src)
1111 {
1112 static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1113 node_type>, "Node types are compatible");
1114 __glibcxx_assert(get_allocator() == __src.get_allocator());
1115
1116 __node_ptr __hint = nullptr;
1117 this->reserve(size() + __src.size());
1118 for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1119 {
1120 auto __pos = __i++;
1121 __hash_code __code
1122 = this->_M_hash_code(__src.hash_function(), *__pos._M_cur);
1123 auto __nh = __src.extract(__pos);
1124 __hint = _M_insert_multi_node(__hint, __code, __nh._M_ptr)._M_cur;
1125 __nh._M_ptr = nullptr;
1126 }
1127 }
1128#endif // C++17
1129
1130 private:
1131 // Helper rehash method used when keys are unique.
1132 void _M_rehash_aux(size_type __bkt_count, true_type __uks);
1133
1134 // Helper rehash method used when keys can be non-unique.
1135 void _M_rehash_aux(size_type __bkt_count, false_type __uks);
1136
1137 // Unconditionally change size of bucket array to n, restore
1138 // hash policy state to __state on exception.
1139 void _M_rehash(size_type __bkt_count, const __rehash_state& __state);
1140 };
1141
1142 // Definitions of class template _Hashtable's out-of-line member functions.
1143 template<typename _Key, typename _Value, typename _Alloc,
1144 typename _ExtractKey, typename _Equal,
1145 typename _Hash, typename _RangeHash, typename _Unused,
1146 typename _RehashPolicy, typename _Traits>
1147 auto
1148 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1149 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1150 _M_bucket_begin(size_type __bkt) const
1151 -> __node_ptr
1152 {
1153 __node_base_ptr __n = _M_buckets[__bkt];
1154 return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
1155 }
1156
1157 template<typename _Key, typename _Value, typename _Alloc,
1158 typename _ExtractKey, typename _Equal,
1159 typename _Hash, typename _RangeHash, typename _Unused,
1160 typename _RehashPolicy, typename _Traits>
1161 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1162 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1163 _Hashtable(size_type __bkt_count_hint,
1164 const _Hash& __h, const _Equal& __eq, const allocator_type& __a)
1165 : _Hashtable(__h, __eq, __a)
1166 {
1167 auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint);
1168 if (__bkt_count > _M_bucket_count)
1169 {
1170 _M_buckets = _M_allocate_buckets(__bkt_count);
1171 _M_bucket_count = __bkt_count;
1172 }
1173 }
1174
1175 template<typename _Key, typename _Value, typename _Alloc,
1176 typename _ExtractKey, typename _Equal,
1177 typename _Hash, typename _RangeHash, typename _Unused,
1178 typename _RehashPolicy, typename _Traits>
1179 template<typename _InputIterator>
1180 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1181 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1182 _Hashtable(_InputIterator __f, _InputIterator __l,
1183 size_type __bkt_count_hint,
1184 const _Hash& __h, const _Equal& __eq,
1185 const allocator_type& __a, true_type /* __uks */)
1186 : _Hashtable(__bkt_count_hint, __h, __eq, __a)
1187 {
1188 for (; __f != __l; ++__f)
1189 this->insert(*__f);
1190 }
1191
1192 template<typename _Key, typename _Value, typename _Alloc,
1193 typename _ExtractKey, typename _Equal,
1194 typename _Hash, typename _RangeHash, typename _Unused,
1195 typename _RehashPolicy, typename _Traits>
1196 template<typename _InputIterator>
1197 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1198 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1199 _Hashtable(_InputIterator __f, _InputIterator __l,
1200 size_type __bkt_count_hint,
1201 const _Hash& __h, const _Equal& __eq,
1202 const allocator_type& __a, false_type /* __uks */)
1203 : _Hashtable(__h, __eq, __a)
1204 {
1205 auto __nb_elems = __detail::__distance_fw(__f, __l);
1206 auto __bkt_count =
1207 _M_rehash_policy._M_next_bkt(
1208 std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems),
1209 __bkt_count_hint));
1210
1211 if (__bkt_count > _M_bucket_count)
1212 {
1213 _M_buckets = _M_allocate_buckets(__bkt_count);
1214 _M_bucket_count = __bkt_count;
1215 }
1216
1217 for (; __f != __l; ++__f)
1218 this->insert(*__f);
1219 }
1220
1221 template<typename _Key, typename _Value, typename _Alloc,
1222 typename _ExtractKey, typename _Equal,
1223 typename _Hash, typename _RangeHash, typename _Unused,
1224 typename _RehashPolicy, typename _Traits>
1225 auto
1226 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1227 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1228 operator=(const _Hashtable& __ht)
1229 -> _Hashtable&
1230 {
1231 if (&__ht == this)
1232 return *this;
1233
1234 if (__node_alloc_traits::_S_propagate_on_copy_assign())
1235 {
1236 auto& __this_alloc = this->_M_node_allocator();
1237 auto& __that_alloc = __ht._M_node_allocator();
1238 if (!__node_alloc_traits::_S_always_equal()
1239 && __this_alloc != __that_alloc)
1240 {
1241 // Replacement allocator cannot free existing storage.
1242 this->_M_deallocate_nodes(_M_begin());
1243 _M_before_begin._M_nxt = nullptr;
1244 _M_deallocate_buckets();
1245 _M_buckets = nullptr;
1246 std::__alloc_on_copy(__this_alloc, __that_alloc);
1247 __hashtable_base::operator=(__ht);
1248 _M_bucket_count = __ht._M_bucket_count;
1249 _M_element_count = __ht._M_element_count;
1250 _M_rehash_policy = __ht._M_rehash_policy;
1251 __alloc_node_gen_t __alloc_node_gen(*this);
1252 __try
1253 {
1254 _M_assign(__ht, __alloc_node_gen);
1255 }
1256 __catch(...)
1257 {
1258 // _M_assign took care of deallocating all memory. Now we
1259 // must make sure this instance remains in a usable state.
1260 _M_reset();
1261 __throw_exception_again;
1262 }
1263 return *this;
1264 }
1265 std::__alloc_on_copy(__this_alloc, __that_alloc);
1266 }
1267
1268 // Reuse allocated buckets and nodes.
1269 _M_assign_elements(__ht);
1270 return *this;
1271 }
1272
1273 template<typename _Key, typename _Value, typename _Alloc,
1274 typename _ExtractKey, typename _Equal,
1275 typename _Hash, typename _RangeHash, typename _Unused,
1276 typename _RehashPolicy, typename _Traits>
1277 template<typename _Ht>
1278 void
1279 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1280 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1281 _M_assign_elements(_Ht&& __ht)
1282 {
1283 __buckets_ptr __former_buckets = nullptr;
1284 std::size_t __former_bucket_count = _M_bucket_count;
1285 const __rehash_state& __former_state = _M_rehash_policy._M_state();
1286
1287 if (_M_bucket_count != __ht._M_bucket_count)
1288 {
1289 __former_buckets = _M_buckets;
1290 _M_buckets = _M_allocate_buckets(__ht._M_bucket_count);
1291 _M_bucket_count = __ht._M_bucket_count;
1292 }
1293 else
1294 __builtin_memset(_M_buckets, 0,
1295 _M_bucket_count * sizeof(__node_base_ptr));
1296
1297 __try
1298 {
1299 __hashtable_base::operator=(std::forward<_Ht>(__ht));
1300 _M_element_count = __ht._M_element_count;
1301 _M_rehash_policy = __ht._M_rehash_policy;
1302 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
1303 _M_before_begin._M_nxt = nullptr;
1304 _M_assign(std::forward<_Ht>(__ht), __roan);
1305 if (__former_buckets)
1306 _M_deallocate_buckets(__former_buckets, __former_bucket_count);
1307 }
1308 __catch(...)
1309 {
1310 if (__former_buckets)
1311 {
1312 // Restore previous buckets.
1313 _M_deallocate_buckets();
1314 _M_rehash_policy._M_reset(__former_state);
1315 _M_buckets = __former_buckets;
1316 _M_bucket_count = __former_bucket_count;
1317 }
1318 __builtin_memset(_M_buckets, 0,
1319 _M_bucket_count * sizeof(__node_base_ptr));
1320 __throw_exception_again;
1321 }
1322 }
1323
1324 template<typename _Key, typename _Value, typename _Alloc,
1325 typename _ExtractKey, typename _Equal,
1326 typename _Hash, typename _RangeHash, typename _Unused,
1327 typename _RehashPolicy, typename _Traits>
1328 template<typename _Ht, typename _NodeGenerator>
1329 void
1330 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1331 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1332 _M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
1333 {
1334 __buckets_ptr __buckets = nullptr;
1335 if (!_M_buckets)
1336 _M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count);
1337
1338 __try
1339 {
1340 if (!__ht._M_before_begin._M_nxt)
1341 return;
1342
1343 // First deal with the special first node pointed to by
1344 // _M_before_begin.
1345 __node_ptr __ht_n = __ht._M_begin();
1346 __node_ptr __this_n
1347 = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1348 this->_M_copy_code(*__this_n, *__ht_n);
1349 _M_update_bbegin(__this_n);
1350
1351 // Then deal with other nodes.
1352 __node_ptr __prev_n = __this_n;
1353 for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
1354 {
1355 __this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1356 __prev_n->_M_nxt = __this_n;
1357 this->_M_copy_code(*__this_n, *__ht_n);
1358 size_type __bkt = _M_bucket_index(*__this_n);
1359 if (!_M_buckets[__bkt])
1360 _M_buckets[__bkt] = __prev_n;
1361 __prev_n = __this_n;
1362 }
1363 }
1364 __catch(...)
1365 {
1366 clear();
1367 if (__buckets)
1368 _M_deallocate_buckets();
1369 __throw_exception_again;
1370 }
1371 }
1372
1373 template<typename _Key, typename _Value, typename _Alloc,
1374 typename _ExtractKey, typename _Equal,
1375 typename _Hash, typename _RangeHash, typename _Unused,
1376 typename _RehashPolicy, typename _Traits>
1377 void
1378 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1379 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1380 _M_reset() noexcept
1381 {
1382 _M_rehash_policy._M_reset();
1383 _M_bucket_count = 1;
1384 _M_single_bucket = nullptr;
1385 _M_buckets = &_M_single_bucket;
1386 _M_before_begin._M_nxt = nullptr;
1387 _M_element_count = 0;
1388 }
1389
1390 template<typename _Key, typename _Value, typename _Alloc,
1391 typename _ExtractKey, typename _Equal,
1392 typename _Hash, typename _RangeHash, typename _Unused,
1393 typename _RehashPolicy, typename _Traits>
1394 void
1395 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1396 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1397 _M_move_assign(_Hashtable&& __ht, true_type)
1398 {
1399 if (__builtin_expect(std::__addressof(__ht) == this, false))
1400 return;
1401
1402 this->_M_deallocate_nodes(_M_begin());
1403 _M_deallocate_buckets();
1404 __hashtable_base::operator=(std::move(__ht));
1405 _M_rehash_policy = __ht._M_rehash_policy;
1406 if (!__ht._M_uses_single_bucket())
1407 _M_buckets = __ht._M_buckets;
1408 else
1409 {
1410 _M_buckets = &_M_single_bucket;
1411 _M_single_bucket = __ht._M_single_bucket;
1412 }
1413
1414 _M_bucket_count = __ht._M_bucket_count;
1415 _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
1416 _M_element_count = __ht._M_element_count;
1417 std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator());
1418
1419 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1420 _M_update_bbegin();
1421 __ht._M_reset();
1422 }
1423
1424 template<typename _Key, typename _Value, typename _Alloc,
1425 typename _ExtractKey, typename _Equal,
1426 typename _Hash, typename _RangeHash, typename _Unused,
1427 typename _RehashPolicy, typename _Traits>
1428 void
1429 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1430 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1431 _M_move_assign(_Hashtable&& __ht, false_type)
1432 {
1433 if (__ht._M_node_allocator() == this->_M_node_allocator())
1434 _M_move_assign(std::move(__ht), true_type{});
1435 else
1436 {
1437 // Can't move memory, move elements then.
1438 _M_assign_elements(std::move(__ht));
1439 __ht.clear();
1440 }
1441 }
1442
1443 template<typename _Key, typename _Value, typename _Alloc,
1444 typename _ExtractKey, typename _Equal,
1445 typename _Hash, typename _RangeHash, typename _Unused,
1446 typename _RehashPolicy, typename _Traits>
1447 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1448 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1449 _Hashtable(const _Hashtable& __ht)
1450 : __hashtable_base(__ht),
1451 __map_base(__ht),
1452 __rehash_base(__ht),
1453 __hashtable_alloc(
1454 __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())),
1455 __enable_default_ctor(__ht),
1456 _M_buckets(nullptr),
1457 _M_bucket_count(__ht._M_bucket_count),
1458 _M_element_count(__ht._M_element_count),
1459 _M_rehash_policy(__ht._M_rehash_policy)
1460 {
1461 __alloc_node_gen_t __alloc_node_gen(*this);
1462 _M_assign(__ht, __alloc_node_gen);
1463 }
1464
1465 template<typename _Key, typename _Value, typename _Alloc,
1466 typename _ExtractKey, typename _Equal,
1467 typename _Hash, typename _RangeHash, typename _Unused,
1468 typename _RehashPolicy, typename _Traits>
1469 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1470 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1471 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1472 true_type /* alloc always equal */)
1473 noexcept(_S_nothrow_move())
1474 : __hashtable_base(__ht),
1475 __map_base(__ht),
1476 __rehash_base(__ht),
1477 __hashtable_alloc(std::move(__a)),
1478 __enable_default_ctor(__ht),
1479 _M_buckets(__ht._M_buckets),
1480 _M_bucket_count(__ht._M_bucket_count),
1481 _M_before_begin(__ht._M_before_begin._M_nxt),
1482 _M_element_count(__ht._M_element_count),
1483 _M_rehash_policy(__ht._M_rehash_policy)
1484 {
1485 // Update buckets if __ht is using its single bucket.
1486 if (__ht._M_uses_single_bucket())
1487 {
1488 _M_buckets = &_M_single_bucket;
1489 _M_single_bucket = __ht._M_single_bucket;
1490 }
1491
1492 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1493 _M_update_bbegin();
1494
1495 __ht._M_reset();
1496 }
1497
1498 template<typename _Key, typename _Value, typename _Alloc,
1499 typename _ExtractKey, typename _Equal,
1500 typename _Hash, typename _RangeHash, typename _Unused,
1501 typename _RehashPolicy, typename _Traits>
1502 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1503 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1504 _Hashtable(const _Hashtable& __ht, const allocator_type& __a)
1505 : __hashtable_base(__ht),
1506 __map_base(__ht),
1507 __rehash_base(__ht),
1508 __hashtable_alloc(__node_alloc_type(__a)),
1509 __enable_default_ctor(__ht),
1510 _M_buckets(),
1511 _M_bucket_count(__ht._M_bucket_count),
1512 _M_element_count(__ht._M_element_count),
1513 _M_rehash_policy(__ht._M_rehash_policy)
1514 {
1515 __alloc_node_gen_t __alloc_node_gen(*this);
1516 _M_assign(__ht, __alloc_node_gen);
1517 }
1518
1519 template<typename _Key, typename _Value, typename _Alloc,
1520 typename _ExtractKey, typename _Equal,
1521 typename _Hash, typename _RangeHash, typename _Unused,
1522 typename _RehashPolicy, typename _Traits>
1523 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1524 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1525 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1526 false_type /* alloc always equal */)
1527 : __hashtable_base(__ht),
1528 __map_base(__ht),
1529 __rehash_base(__ht),
1530 __hashtable_alloc(std::move(__a)),
1531 __enable_default_ctor(__ht),
1532 _M_buckets(nullptr),
1533 _M_bucket_count(__ht._M_bucket_count),
1534 _M_element_count(__ht._M_element_count),
1535 _M_rehash_policy(__ht._M_rehash_policy)
1536 {
1537 if (__ht._M_node_allocator() == this->_M_node_allocator())
1538 {
1539 if (__ht._M_uses_single_bucket())
1540 {
1541 _M_buckets = &_M_single_bucket;
1542 _M_single_bucket = __ht._M_single_bucket;
1543 }
1544 else
1545 _M_buckets = __ht._M_buckets;
1546
1547 // Fix bucket containing the _M_before_begin pointer that can't be
1548 // moved.
1549 _M_update_bbegin(__ht._M_begin());
1550
1551 __ht._M_reset();
1552 }
1553 else
1554 {
1555 __alloc_node_gen_t __alloc_gen(*this);
1556
1557 using _Fwd_Ht = __conditional_t<
1558 __move_if_noexcept_cond<value_type>::value,
1559 const _Hashtable&, _Hashtable&&>;
1560 _M_assign(std::forward<_Fwd_Ht>(__ht), __alloc_gen);
1561 __ht.clear();
1562 }
1563 }
1564
1565 template<typename _Key, typename _Value, typename _Alloc,
1566 typename _ExtractKey, typename _Equal,
1567 typename _Hash, typename _RangeHash, typename _Unused,
1568 typename _RehashPolicy, typename _Traits>
1569 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1570 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1571 ~_Hashtable() noexcept
1572 {
1573 // Getting a bucket index from a node shall not throw because it is used
1574 // in methods (erase, swap...) that shall not throw. Need a complete
1575 // type to check this, so do it in the destructor not at class scope.
1576 static_assert(noexcept(declval<const __hash_code_base_access&>()
1577 ._M_bucket_index(declval<const __node_value_type&>(),
1578 (std::size_t)0)),
1579 "Cache the hash code or qualify your functors involved"
1580 " in hash code and bucket index computation with noexcept");
1581
1582 clear();
1583 _M_deallocate_buckets();
1584 }
1585
1586 template<typename _Key, typename _Value, typename _Alloc,
1587 typename _ExtractKey, typename _Equal,
1588 typename _Hash, typename _RangeHash, typename _Unused,
1589 typename _RehashPolicy, typename _Traits>
1590 void
1591 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1592 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1593 swap(_Hashtable& __x)
1594 noexcept(__and_<__is_nothrow_swappable<_Hash>,
1595 __is_nothrow_swappable<_Equal>>::value)
1596 {
1597 // The only base class with member variables is hash_code_base.
1598 // We define _Hash_code_base::_M_swap because different
1599 // specializations have different members.
1600 this->_M_swap(__x);
1601
1602 std::__alloc_on_swap(this->_M_node_allocator(), __x._M_node_allocator());
1603 std::swap(_M_rehash_policy, __x._M_rehash_policy);
1604
1605 // Deal properly with potentially moved instances.
1606 if (this->_M_uses_single_bucket())
1607 {
1608 if (!__x._M_uses_single_bucket())
1609 {
1610 _M_buckets = __x._M_buckets;
1611 __x._M_buckets = &__x._M_single_bucket;
1612 }
1613 }
1614 else if (__x._M_uses_single_bucket())
1615 {
1616 __x._M_buckets = _M_buckets;
1617 _M_buckets = &_M_single_bucket;
1618 }
1619 else
1620 std::swap(_M_buckets, __x._M_buckets);
1621
1622 std::swap(_M_bucket_count, __x._M_bucket_count);
1623 std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt);
1624 std::swap(_M_element_count, __x._M_element_count);
1625 std::swap(_M_single_bucket, __x._M_single_bucket);
1626
1627 // Fix buckets containing the _M_before_begin pointers that can't be
1628 // swapped.
1629 _M_update_bbegin();
1630 __x._M_update_bbegin();
1631 }
1632
1633 template<typename _Key, typename _Value, typename _Alloc,
1634 typename _ExtractKey, typename _Equal,
1635 typename _Hash, typename _RangeHash, typename _Unused,
1636 typename _RehashPolicy, typename _Traits>
1637 auto
1638 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1639 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1640 find(const key_type& __k)
1641 -> iterator
1642 {
1643 if (size() <= __small_size_threshold())
1644 {
1645 for (auto __it = begin(); __it != end(); ++__it)
1646 if (this->_M_key_equals(__k, *__it._M_cur))
1647 return __it;
1648 return end();
1649 }
1650
1651 __hash_code __code = this->_M_hash_code(__k);
1652 std::size_t __bkt = _M_bucket_index(__code);
1653 return iterator(_M_find_node(__bkt, __k, __code));
1654 }
1655
1656 template<typename _Key, typename _Value, typename _Alloc,
1657 typename _ExtractKey, typename _Equal,
1658 typename _Hash, typename _RangeHash, typename _Unused,
1659 typename _RehashPolicy, typename _Traits>
1660 auto
1661 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1662 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1663 find(const key_type& __k) const
1664 -> const_iterator
1665 {
1666 if (size() <= __small_size_threshold())
1667 {
1668 for (auto __it = begin(); __it != end(); ++__it)
1669 if (this->_M_key_equals(__k, *__it._M_cur))
1670 return __it;
1671 return end();
1672 }
1673
1674 __hash_code __code = this->_M_hash_code(__k);
1675 std::size_t __bkt = _M_bucket_index(__code);
1676 return const_iterator(_M_find_node(__bkt, __k, __code));
1677 }
1678
1679#if __cplusplus > 201703L
1680 template<typename _Key, typename _Value, typename _Alloc,
1681 typename _ExtractKey, typename _Equal,
1682 typename _Hash, typename _RangeHash, typename _Unused,
1683 typename _RehashPolicy, typename _Traits>
1684 template<typename _Kt, typename, typename>
1685 auto
1686 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1687 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1688 _M_find_tr(const _Kt& __k)
1689 -> iterator
1690 {
1691 __hash_code __code = this->_M_hash_code_tr(__k);
1692 std::size_t __bkt = _M_bucket_index(__code);
1693 return iterator(_M_find_node_tr(__bkt, __k, __code));
1694 }
1695
1696 template<typename _Key, typename _Value, typename _Alloc,
1697 typename _ExtractKey, typename _Equal,
1698 typename _Hash, typename _RangeHash, typename _Unused,
1699 typename _RehashPolicy, typename _Traits>
1700 template<typename _Kt, typename, typename>
1701 auto
1702 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1703 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1704 _M_find_tr(const _Kt& __k) const
1705 -> const_iterator
1706 {
1707 __hash_code __code = this->_M_hash_code_tr(__k);
1708 std::size_t __bkt = _M_bucket_index(__code);
1709 return const_iterator(_M_find_node_tr(__bkt, __k, __code));
1710 }
1711#endif
1712
1713 template<typename _Key, typename _Value, typename _Alloc,
1714 typename _ExtractKey, typename _Equal,
1715 typename _Hash, typename _RangeHash, typename _Unused,
1716 typename _RehashPolicy, typename _Traits>
1717 auto
1718 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1719 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1720 count(const key_type& __k) const
1721 -> size_type
1722 {
1723 auto __it = find(__k);
1724 if (!__it._M_cur)
1725 return 0;
1726
1727 if (__unique_keys::value)
1728 return 1;
1729
1730 // All equivalent values are next to each other, if we find a
1731 // non-equivalent value after an equivalent one it means that we won't
1732 // find any new equivalent value.
1733 size_type __result = 1;
1734 for (auto __ref = __it++;
1735 __it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
1736 ++__it)
1737 ++__result;
1738
1739 return __result;
1740 }
1741
1742#if __cplusplus > 201703L
1743 template<typename _Key, typename _Value, typename _Alloc,
1744 typename _ExtractKey, typename _Equal,
1745 typename _Hash, typename _RangeHash, typename _Unused,
1746 typename _RehashPolicy, typename _Traits>
1747 template<typename _Kt, typename, typename>
1748 auto
1749 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1750 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1751 _M_count_tr(const _Kt& __k) const
1752 -> size_type
1753 {
1754 __hash_code __code = this->_M_hash_code_tr(__k);
1755 std::size_t __bkt = _M_bucket_index(__code);
1756 auto __n = _M_find_node_tr(__bkt, __k, __code);
1757 if (!__n)
1758 return 0;
1759
1760 // All equivalent values are next to each other, if we find a
1761 // non-equivalent value after an equivalent one it means that we won't
1762 // find any new equivalent value.
1763 iterator __it(__n);
1764 size_type __result = 1;
1765 for (++__it;
1766 __it._M_cur && this->_M_equals_tr(__k, __code, *__it._M_cur);
1767 ++__it)
1768 ++__result;
1769
1770 return __result;
1771 }
1772#endif
1773
1774 template<typename _Key, typename _Value, typename _Alloc,
1775 typename _ExtractKey, typename _Equal,
1776 typename _Hash, typename _RangeHash, typename _Unused,
1777 typename _RehashPolicy, typename _Traits>
1778 auto
1779 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1780 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1781 equal_range(const key_type& __k)
1782 -> pair<iterator, iterator>
1783 {
1784 auto __ite = find(__k);
1785 if (!__ite._M_cur)
1786 return { __ite, __ite };
1787
1788 auto __beg = __ite++;
1789 if (__unique_keys::value)
1790 return { __beg, __ite };
1791
1792 // All equivalent values are next to each other, if we find a
1793 // non-equivalent value after an equivalent one it means that we won't
1794 // find any new equivalent value.
1795 while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1796 ++__ite;
1797
1798 return { __beg, __ite };
1799 }
1800
1801 template<typename _Key, typename _Value, typename _Alloc,
1802 typename _ExtractKey, typename _Equal,
1803 typename _Hash, typename _RangeHash, typename _Unused,
1804 typename _RehashPolicy, typename _Traits>
1805 auto
1806 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1807 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1808 equal_range(const key_type& __k) const
1809 -> pair<const_iterator, const_iterator>
1810 {
1811 auto __ite = find(__k);
1812 if (!__ite._M_cur)
1813 return { __ite, __ite };
1814
1815 auto __beg = __ite++;
1816 if (__unique_keys::value)
1817 return { __beg, __ite };
1818
1819 // All equivalent values are next to each other, if we find a
1820 // non-equivalent value after an equivalent one it means that we won't
1821 // find any new equivalent value.
1822 while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1823 ++__ite;
1824
1825 return { __beg, __ite };
1826 }
1827
1828#if __cplusplus > 201703L
1829 template<typename _Key, typename _Value, typename _Alloc,
1830 typename _ExtractKey, typename _Equal,
1831 typename _Hash, typename _RangeHash, typename _Unused,
1832 typename _RehashPolicy, typename _Traits>
1833 template<typename _Kt, typename, typename>
1834 auto
1835 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1836 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1837 _M_equal_range_tr(const _Kt& __k)
1838 -> pair<iterator, iterator>
1839 {
1840 __hash_code __code = this->_M_hash_code_tr(__k);
1841 std::size_t __bkt = _M_bucket_index(__code);
1842 auto __n = _M_find_node_tr(__bkt, __k, __code);
1843 iterator __ite(__n);
1844 if (!__n)
1845 return { __ite, __ite };
1846
1847 // All equivalent values are next to each other, if we find a
1848 // non-equivalent value after an equivalent one it means that we won't
1849 // find any new equivalent value.
1850 auto __beg = __ite++;
1851 while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
1852 ++__ite;
1853
1854 return { __beg, __ite };
1855 }
1856
1857 template<typename _Key, typename _Value, typename _Alloc,
1858 typename _ExtractKey, typename _Equal,
1859 typename _Hash, typename _RangeHash, typename _Unused,
1860 typename _RehashPolicy, typename _Traits>
1861 template<typename _Kt, typename, typename>
1862 auto
1863 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1864 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1865 _M_equal_range_tr(const _Kt& __k) const
1866 -> pair<const_iterator, const_iterator>
1867 {
1868 __hash_code __code = this->_M_hash_code_tr(__k);
1869 std::size_t __bkt = _M_bucket_index(__code);
1870 auto __n = _M_find_node_tr(__bkt, __k, __code);
1871 const_iterator __ite(__n);
1872 if (!__n)
1873 return { __ite, __ite };
1874
1875 // All equivalent values are next to each other, if we find a
1876 // non-equivalent value after an equivalent one it means that we won't
1877 // find any new equivalent value.
1878 auto __beg = __ite++;
1879 while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
1880 ++__ite;
1881
1882 return { __beg, __ite };
1883 }
1884#endif
1885
1886 // Find the node before the one whose key compares equal to k.
1887 // Return nullptr if no node is found.
1888 template<typename _Key, typename _Value, typename _Alloc,
1889 typename _ExtractKey, typename _Equal,
1890 typename _Hash, typename _RangeHash, typename _Unused,
1891 typename _RehashPolicy, typename _Traits>
1892 auto
1893 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1894 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1895 _M_find_before_node(const key_type& __k)
1896 -> __node_base_ptr
1897 {
1898 __node_base_ptr __prev_p = &_M_before_begin;
1899 if (!__prev_p->_M_nxt)
1900 return nullptr;
1901
1902 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);
1903 __p != nullptr;
1904 __p = __p->_M_next())
1905 {
1906 if (this->_M_key_equals(__k, *__p))
1907 return __prev_p;
1908
1909 __prev_p = __p;
1910 }
1911
1912 return nullptr;
1913 }
1914
1915 // Find the node before the one whose key compares equal to k in the bucket
1916 // bkt. Return nullptr if no node is found.
1917 template<typename _Key, typename _Value, typename _Alloc,
1918 typename _ExtractKey, typename _Equal,
1919 typename _Hash, typename _RangeHash, typename _Unused,
1920 typename _RehashPolicy, typename _Traits>
1921 auto
1922 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1923 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1924 _M_find_before_node(size_type __bkt, const key_type& __k,
1925 __hash_code __code) const
1926 -> __node_base_ptr
1927 {
1928 __node_base_ptr __prev_p = _M_buckets[__bkt];
1929 if (!__prev_p)
1930 return nullptr;
1931
1932 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
1933 __p = __p->_M_next())
1934 {
1935 if (this->_M_equals(__k, __code, *__p))
1936 return __prev_p;
1937
1938 if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
1939 break;
1940 __prev_p = __p;
1941 }
1942
1943 return nullptr;
1944 }
1945
1946 template<typename _Key, typename _Value, typename _Alloc,
1947 typename _ExtractKey, typename _Equal,
1948 typename _Hash, typename _RangeHash, typename _Unused,
1949 typename _RehashPolicy, typename _Traits>
1950 template<typename _Kt>
1951 auto
1952 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1953 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1954 _M_find_before_node_tr(size_type __bkt, const _Kt& __k,
1955 __hash_code __code) const
1956 -> __node_base_ptr
1957 {
1958 __node_base_ptr __prev_p = _M_buckets[__bkt];
1959 if (!__prev_p)
1960 return nullptr;
1961
1962 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
1963 __p = __p->_M_next())
1964 {
1965 if (this->_M_equals_tr(__k, __code, *__p))
1966 return __prev_p;
1967
1968 if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
1969 break;
1970 __prev_p = __p;
1971 }
1972
1973 return nullptr;
1974 }
1975
1976 template<typename _Key, typename _Value, typename _Alloc,
1977 typename _ExtractKey, typename _Equal,
1978 typename _Hash, typename _RangeHash, typename _Unused,
1979 typename _RehashPolicy, typename _Traits>
1980 void
1981 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1982 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1983 _M_insert_bucket_begin(size_type __bkt, __node_ptr __node)
1984 {
1985 if (_M_buckets[__bkt])
1986 {
1987 // Bucket is not empty, we just need to insert the new node
1988 // after the bucket before begin.
1989 __node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
1990 _M_buckets[__bkt]->_M_nxt = __node;
1991 }
1992 else
1993 {
1994 // The bucket is empty, the new node is inserted at the
1995 // beginning of the singly-linked list and the bucket will
1996 // contain _M_before_begin pointer.
1997 __node->_M_nxt = _M_before_begin._M_nxt;
1998 _M_before_begin._M_nxt = __node;
1999
2000 if (__node->_M_nxt)
2001 // We must update former begin bucket that is pointing to
2002 // _M_before_begin.
2003 _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
2004
2005 _M_buckets[__bkt] = &_M_before_begin;
2006 }
2007 }
2008
2009 template<typename _Key, typename _Value, typename _Alloc,
2010 typename _ExtractKey, typename _Equal,
2011 typename _Hash, typename _RangeHash, typename _Unused,
2012 typename _RehashPolicy, typename _Traits>
2013 void
2014 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2015 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2016 _M_remove_bucket_begin(size_type __bkt, __node_ptr __next,
2017 size_type __next_bkt)
2018 {
2019 if (!__next || __next_bkt != __bkt)
2020 {
2021 // Bucket is now empty
2022 // First update next bucket if any
2023 if (__next)
2024 _M_buckets[__next_bkt] = _M_buckets[__bkt];
2025
2026 // Second update before begin node if necessary
2027 if (&_M_before_begin == _M_buckets[__bkt])
2028 _M_before_begin._M_nxt = __next;
2029 _M_buckets[__bkt] = nullptr;
2030 }
2031 }
2032
2033 template<typename _Key, typename _Value, typename _Alloc,
2034 typename _ExtractKey, typename _Equal,
2035 typename _Hash, typename _RangeHash, typename _Unused,
2036 typename _RehashPolicy, typename _Traits>
2037 auto
2038 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2039 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2040 _M_get_previous_node(size_type __bkt, __node_ptr __n)
2041 -> __node_base_ptr
2042 {
2043 __node_base_ptr __prev_n = _M_buckets[__bkt];
2044 while (__prev_n->_M_nxt != __n)
2045 __prev_n = __prev_n->_M_nxt;
2046 return __prev_n;
2047 }
2048
2049 template<typename _Key, typename _Value, typename _Alloc,
2050 typename _ExtractKey, typename _Equal,
2051 typename _Hash, typename _RangeHash, typename _Unused,
2052 typename _RehashPolicy, typename _Traits>
2053 template<typename... _Args>
2054 auto
2055 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2056 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2057 _M_emplace(true_type /* __uks */, _Args&&... __args)
2058 -> pair<iterator, bool>
2059 {
2060 // First build the node to get access to the hash code
2061 _Scoped_node __node { this, std::forward<_Args>(__args)... };
2062 const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2063 if (size() <= __small_size_threshold())
2064 {
2065 for (auto __it = begin(); __it != end(); ++__it)
2066 if (this->_M_key_equals(__k, *__it._M_cur))
2067 // There is already an equivalent node, no insertion
2068 return { __it, false };
2069 }
2070
2071 __hash_code __code = this->_M_hash_code(__k);
2072 size_type __bkt = _M_bucket_index(__code);
2073 if (size() > __small_size_threshold())
2074 if (__node_ptr __p = _M_find_node(__bkt, __k, __code))
2075 // There is already an equivalent node, no insertion
2076 return { iterator(__p), false };
2077
2078 // Insert the node
2079 auto __pos = _M_insert_unique_node(__bkt, __code, __node._M_node);
2080 __node._M_node = nullptr;
2081 return { __pos, true };
2082 }
2083
2084 template<typename _Key, typename _Value, typename _Alloc,
2085 typename _ExtractKey, typename _Equal,
2086 typename _Hash, typename _RangeHash, typename _Unused,
2087 typename _RehashPolicy, typename _Traits>
2088 template<typename... _Args>
2089 auto
2090 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2091 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2092 _M_emplace(const_iterator __hint, false_type /* __uks */,
2093 _Args&&... __args)
2094 -> iterator
2095 {
2096 // First build the node to get its hash code.
2097 _Scoped_node __node { this, std::forward<_Args>(__args)... };
2098 const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2099
2100 auto __res = this->_M_compute_hash_code(__hint, __k);
2101 auto __pos
2102 = _M_insert_multi_node(__res.first._M_cur, __res.second,
2103 __node._M_node);
2104 __node._M_node = nullptr;
2105 return __pos;
2106 }
2107
2108 template<typename _Key, typename _Value, typename _Alloc,
2109 typename _ExtractKey, typename _Equal,
2110 typename _Hash, typename _RangeHash, typename _Unused,
2111 typename _RehashPolicy, typename _Traits>
2112 auto
2113 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2114 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2115 _M_compute_hash_code(const_iterator __hint, const key_type& __k) const
2116 -> pair<const_iterator, __hash_code>
2117 {
2118 if (size() <= __small_size_threshold())
2119 {
2120 if (__hint != cend())
2121 {
2122 for (auto __it = __hint; __it != cend(); ++__it)
2123 if (this->_M_key_equals(__k, *__it._M_cur))
2124 return { __it, this->_M_hash_code(*__it._M_cur) };
2125 }
2126
2127 for (auto __it = cbegin(); __it != __hint; ++__it)
2128 if (this->_M_key_equals(__k, *__it._M_cur))
2129 return { __it, this->_M_hash_code(*__it._M_cur) };
2130 }
2131
2132 return { __hint, this->_M_hash_code(__k) };
2133 }
2134
2135 template<typename _Key, typename _Value, typename _Alloc,
2136 typename _ExtractKey, typename _Equal,
2137 typename _Hash, typename _RangeHash, typename _Unused,
2138 typename _RehashPolicy, typename _Traits>
2139 auto
2140 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2141 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2142 _M_insert_unique_node(size_type __bkt, __hash_code __code,
2143 __node_ptr __node, size_type __n_elt)
2144 -> iterator
2145 {
2146 const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2148 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count,
2149 __n_elt);
2150
2151 if (__do_rehash.first)
2152 {
2153 _M_rehash(__do_rehash.second, __saved_state);
2154 __bkt = _M_bucket_index(__code);
2155 }
2156
2157 this->_M_store_code(*__node, __code);
2158
2159 // Always insert at the beginning of the bucket.
2160 _M_insert_bucket_begin(__bkt, __node);
2161 ++_M_element_count;
2162 return iterator(__node);
2163 }
2164
2165 template<typename _Key, typename _Value, typename _Alloc,
2166 typename _ExtractKey, typename _Equal,
2167 typename _Hash, typename _RangeHash, typename _Unused,
2168 typename _RehashPolicy, typename _Traits>
2169 auto
2170 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2171 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2172 _M_insert_multi_node(__node_ptr __hint,
2173 __hash_code __code, __node_ptr __node)
2174 -> iterator
2175 {
2176 const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2178 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1);
2179
2180 if (__do_rehash.first)
2181 _M_rehash(__do_rehash.second, __saved_state);
2182
2183 this->_M_store_code(*__node, __code);
2184 const key_type& __k = _ExtractKey{}(__node->_M_v());
2185 size_type __bkt = _M_bucket_index(__code);
2186
2187 // Find the node before an equivalent one or use hint if it exists and
2188 // if it is equivalent.
2189 __node_base_ptr __prev
2190 = __builtin_expect(__hint != nullptr, false)
2191 && this->_M_equals(__k, __code, *__hint)
2192 ? __hint
2193 : _M_find_before_node(__bkt, __k, __code);
2194
2195 if (__prev)
2196 {
2197 // Insert after the node before the equivalent one.
2198 __node->_M_nxt = __prev->_M_nxt;
2199 __prev->_M_nxt = __node;
2200 if (__builtin_expect(__prev == __hint, false))
2201 // hint might be the last bucket node, in this case we need to
2202 // update next bucket.
2203 if (__node->_M_nxt
2204 && !this->_M_equals(__k, __code, *__node->_M_next()))
2205 {
2206 size_type __next_bkt = _M_bucket_index(*__node->_M_next());
2207 if (__next_bkt != __bkt)
2208 _M_buckets[__next_bkt] = __node;
2209 }
2210 }
2211 else
2212 // The inserted node has no equivalent in the hashtable. We must
2213 // insert the new node at the beginning of the bucket to preserve
2214 // equivalent elements' relative positions.
2215 _M_insert_bucket_begin(__bkt, __node);
2216 ++_M_element_count;
2217 return iterator(__node);
2218 }
2219
2220 // Insert v if no element with its key is already present.
2221 template<typename _Key, typename _Value, typename _Alloc,
2222 typename _ExtractKey, typename _Equal,
2223 typename _Hash, typename _RangeHash, typename _Unused,
2224 typename _RehashPolicy, typename _Traits>
2225 template<typename _Kt, typename _Arg, typename _NodeGenerator>
2226 auto
2227 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2228 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2229 _M_insert_unique(_Kt&& __k, _Arg&& __v,
2230 const _NodeGenerator& __node_gen)
2231 -> pair<iterator, bool>
2232 {
2233 if (size() <= __small_size_threshold())
2234 for (auto __it = begin(); __it != end(); ++__it)
2235 if (this->_M_key_equals_tr(__k, *__it._M_cur))
2236 return { __it, false };
2237
2238 __hash_code __code = this->_M_hash_code_tr(__k);
2239 size_type __bkt = _M_bucket_index(__code);
2240
2241 if (size() > __small_size_threshold())
2242 if (__node_ptr __node = _M_find_node_tr(__bkt, __k, __code))
2243 return { iterator(__node), false };
2244
2245 _Scoped_node __node {
2246 __node_builder_t::_S_build(std::forward<_Kt>(__k),
2247 std::forward<_Arg>(__v),
2248 __node_gen),
2249 this
2250 };
2251 auto __pos
2252 = _M_insert_unique_node(__bkt, __code, __node._M_node);
2253 __node._M_node = nullptr;
2254 return { __pos, true };
2255 }
2256
2257 // Insert v unconditionally.
2258 template<typename _Key, typename _Value, typename _Alloc,
2259 typename _ExtractKey, typename _Equal,
2260 typename _Hash, typename _RangeHash, typename _Unused,
2261 typename _RehashPolicy, typename _Traits>
2262 template<typename _Arg, typename _NodeGenerator>
2263 auto
2264 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2265 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2266 _M_insert(const_iterator __hint, _Arg&& __v,
2267 const _NodeGenerator& __node_gen,
2268 false_type /* __uks */)
2269 -> iterator
2270 {
2271 // First allocate new node so that we don't do anything if it throws.
2272 _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
2273
2274 // Second compute the hash code so that we don't rehash if it throws.
2275 auto __res = this->_M_compute_hash_code(
2276 __hint, _ExtractKey{}(__node._M_node->_M_v()));
2277
2278 auto __pos
2279 = _M_insert_multi_node(__res.first._M_cur, __res.second,
2280 __node._M_node);
2281 __node._M_node = nullptr;
2282 return __pos;
2283 }
2284
2285 template<typename _Key, typename _Value, typename _Alloc,
2286 typename _ExtractKey, typename _Equal,
2287 typename _Hash, typename _RangeHash, typename _Unused,
2288 typename _RehashPolicy, typename _Traits>
2289 auto
2290 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2291 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2292 erase(const_iterator __it)
2293 -> iterator
2294 {
2295 __node_ptr __n = __it._M_cur;
2296 std::size_t __bkt = _M_bucket_index(*__n);
2297
2298 // Look for previous node to unlink it from the erased one, this
2299 // is why we need buckets to contain the before begin to make
2300 // this search fast.
2301 __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2302 return _M_erase(__bkt, __prev_n, __n);
2303 }
2304
2305 template<typename _Key, typename _Value, typename _Alloc,
2306 typename _ExtractKey, typename _Equal,
2307 typename _Hash, typename _RangeHash, typename _Unused,
2308 typename _RehashPolicy, typename _Traits>
2309 auto
2310 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2311 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2312 _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
2313 -> iterator
2314 {
2315 if (__prev_n == _M_buckets[__bkt])
2316 _M_remove_bucket_begin(__bkt, __n->_M_next(),
2317 __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
2318 else if (__n->_M_nxt)
2319 {
2320 size_type __next_bkt = _M_bucket_index(*__n->_M_next());
2321 if (__next_bkt != __bkt)
2322 _M_buckets[__next_bkt] = __prev_n;
2323 }
2324
2325 __prev_n->_M_nxt = __n->_M_nxt;
2326 iterator __result(__n->_M_next());
2327 this->_M_deallocate_node(__n);
2328 --_M_element_count;
2329
2330 return __result;
2331 }
2332
2333 template<typename _Key, typename _Value, typename _Alloc,
2334 typename _ExtractKey, typename _Equal,
2335 typename _Hash, typename _RangeHash, typename _Unused,
2336 typename _RehashPolicy, typename _Traits>
2337 auto
2338 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2339 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2340 _M_erase(true_type /* __uks */, const key_type& __k)
2341 -> size_type
2342 {
2343 __node_base_ptr __prev_n;
2344 __node_ptr __n;
2345 std::size_t __bkt;
2346 if (size() <= __small_size_threshold())
2347 {
2348 __prev_n = _M_find_before_node(__k);
2349 if (!__prev_n)
2350 return 0;
2351
2352 // We found a matching node, erase it.
2353 __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2354 __bkt = _M_bucket_index(*__n);
2355 }
2356 else
2357 {
2358 __hash_code __code = this->_M_hash_code(__k);
2359 __bkt = _M_bucket_index(__code);
2360
2361 // Look for the node before the first matching node.
2362 __prev_n = _M_find_before_node(__bkt, __k, __code);
2363 if (!__prev_n)
2364 return 0;
2365
2366 // We found a matching node, erase it.
2367 __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2368 }
2369
2370 _M_erase(__bkt, __prev_n, __n);
2371 return 1;
2372 }
2373
2374 template<typename _Key, typename _Value, typename _Alloc,
2375 typename _ExtractKey, typename _Equal,
2376 typename _Hash, typename _RangeHash, typename _Unused,
2377 typename _RehashPolicy, typename _Traits>
2378 auto
2379 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2380 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2381 _M_erase(false_type /* __uks */, const key_type& __k)
2382 -> size_type
2383 {
2384 std::size_t __bkt;
2385 __node_base_ptr __prev_n;
2386 __node_ptr __n;
2387 if (size() <= __small_size_threshold())
2388 {
2389 __prev_n = _M_find_before_node(__k);
2390 if (!__prev_n)
2391 return 0;
2392
2393 // We found a matching node, erase it.
2394 __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2395 __bkt = _M_bucket_index(*__n);
2396 }
2397 else
2398 {
2399 __hash_code __code = this->_M_hash_code(__k);
2400 __bkt = _M_bucket_index(__code);
2401
2402 // Look for the node before the first matching node.
2403 __prev_n = _M_find_before_node(__bkt, __k, __code);
2404 if (!__prev_n)
2405 return 0;
2406
2407 __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2408 }
2409
2410 // _GLIBCXX_RESOLVE_LIB_DEFECTS
2411 // 526. Is it undefined if a function in the standard changes
2412 // in parameters?
2413 // We use one loop to find all matching nodes and another to deallocate
2414 // them so that the key stays valid during the first loop. It might be
2415 // invalidated indirectly when destroying nodes.
2416 __node_ptr __n_last = __n->_M_next();
2417 while (__n_last && this->_M_node_equals(*__n, *__n_last))
2418 __n_last = __n_last->_M_next();
2419
2420 std::size_t __n_last_bkt = __n_last ? _M_bucket_index(*__n_last) : __bkt;
2421
2422 // Deallocate nodes.
2423 size_type __result = 0;
2424 do
2425 {
2426 __node_ptr __p = __n->_M_next();
2427 this->_M_deallocate_node(__n);
2428 __n = __p;
2429 ++__result;
2430 }
2431 while (__n != __n_last);
2432
2433 _M_element_count -= __result;
2434 if (__prev_n == _M_buckets[__bkt])
2435 _M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
2436 else if (__n_last_bkt != __bkt)
2437 _M_buckets[__n_last_bkt] = __prev_n;
2438 __prev_n->_M_nxt = __n_last;
2439 return __result;
2440 }
2441
2442 template<typename _Key, typename _Value, typename _Alloc,
2443 typename _ExtractKey, typename _Equal,
2444 typename _Hash, typename _RangeHash, typename _Unused,
2445 typename _RehashPolicy, typename _Traits>
2446 auto
2447 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2448 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2449 erase(const_iterator __first, const_iterator __last)
2450 -> iterator
2451 {
2452 __node_ptr __n = __first._M_cur;
2453 __node_ptr __last_n = __last._M_cur;
2454 if (__n == __last_n)
2455 return iterator(__n);
2456
2457 std::size_t __bkt = _M_bucket_index(*__n);
2458
2459 __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2460 bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
2461 std::size_t __n_bkt = __bkt;
2462 for (;;)
2463 {
2464 do
2465 {
2466 __node_ptr __tmp = __n;
2467 __n = __n->_M_next();
2468 this->_M_deallocate_node(__tmp);
2469 --_M_element_count;
2470 if (!__n)
2471 break;
2472 __n_bkt = _M_bucket_index(*__n);
2473 }
2474 while (__n != __last_n && __n_bkt == __bkt);
2475 if (__is_bucket_begin)
2476 _M_remove_bucket_begin(__bkt, __n, __n_bkt);
2477 if (__n == __last_n)
2478 break;
2479 __is_bucket_begin = true;
2480 __bkt = __n_bkt;
2481 }
2482
2483 if (__n && (__n_bkt != __bkt || __is_bucket_begin))
2484 _M_buckets[__n_bkt] = __prev_n;
2485 __prev_n->_M_nxt = __n;
2486 return iterator(__n);
2487 }
2488
2489 template<typename _Key, typename _Value, typename _Alloc,
2490 typename _ExtractKey, typename _Equal,
2491 typename _Hash, typename _RangeHash, typename _Unused,
2492 typename _RehashPolicy, typename _Traits>
2493 void
2494 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2495 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2496 clear() noexcept
2497 {
2498 this->_M_deallocate_nodes(_M_begin());
2499 __builtin_memset(_M_buckets, 0,
2500 _M_bucket_count * sizeof(__node_base_ptr));
2501 _M_element_count = 0;
2502 _M_before_begin._M_nxt = nullptr;
2503 }
2504
2505 template<typename _Key, typename _Value, typename _Alloc,
2506 typename _ExtractKey, typename _Equal,
2507 typename _Hash, typename _RangeHash, typename _Unused,
2508 typename _RehashPolicy, typename _Traits>
2509 void
2510 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2511 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2512 rehash(size_type __bkt_count)
2513 {
2514 const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2515 __bkt_count
2516 = std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
2517 __bkt_count);
2518 __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
2519
2520 if (__bkt_count != _M_bucket_count)
2521 _M_rehash(__bkt_count, __saved_state);
2522 else
2523 // No rehash, restore previous state to keep it consistent with
2524 // container state.
2525 _M_rehash_policy._M_reset(__saved_state);
2526 }
2527
2528 template<typename _Key, typename _Value, typename _Alloc,
2529 typename _ExtractKey, typename _Equal,
2530 typename _Hash, typename _RangeHash, typename _Unused,
2531 typename _RehashPolicy, typename _Traits>
2532 void
2533 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2534 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2535 _M_rehash(size_type __bkt_count, const __rehash_state& __state)
2536 {
2537 __try
2538 {
2539 _M_rehash_aux(__bkt_count, __unique_keys{});
2540 }
2541 __catch(...)
2542 {
2543 // A failure here means that buckets allocation failed. We only
2544 // have to restore hash policy previous state.
2545 _M_rehash_policy._M_reset(__state);
2546 __throw_exception_again;
2547 }
2548 }
2549
2550 // Rehash when there is no equivalent elements.
2551 template<typename _Key, typename _Value, typename _Alloc,
2552 typename _ExtractKey, typename _Equal,
2553 typename _Hash, typename _RangeHash, typename _Unused,
2554 typename _RehashPolicy, typename _Traits>
2555 void
2556 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2557 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2558 _M_rehash_aux(size_type __bkt_count, true_type /* __uks */)
2559 {
2560 __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2561 __node_ptr __p = _M_begin();
2562 _M_before_begin._M_nxt = nullptr;
2563 std::size_t __bbegin_bkt = 0;
2564 while (__p)
2565 {
2566 __node_ptr __next = __p->_M_next();
2567 std::size_t __bkt
2568 = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2569 if (!__new_buckets[__bkt])
2570 {
2571 __p->_M_nxt = _M_before_begin._M_nxt;
2572 _M_before_begin._M_nxt = __p;
2573 __new_buckets[__bkt] = &_M_before_begin;
2574 if (__p->_M_nxt)
2575 __new_buckets[__bbegin_bkt] = __p;
2576 __bbegin_bkt = __bkt;
2577 }
2578 else
2579 {
2580 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2581 __new_buckets[__bkt]->_M_nxt = __p;
2582 }
2583
2584 __p = __next;
2585 }
2586
2587 _M_deallocate_buckets();
2588 _M_bucket_count = __bkt_count;
2589 _M_buckets = __new_buckets;
2590 }
2591
2592 // Rehash when there can be equivalent elements, preserve their relative
2593 // order.
2594 template<typename _Key, typename _Value, typename _Alloc,
2595 typename _ExtractKey, typename _Equal,
2596 typename _Hash, typename _RangeHash, typename _Unused,
2597 typename _RehashPolicy, typename _Traits>
2598 void
2599 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2600 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2601 _M_rehash_aux(size_type __bkt_count, false_type /* __uks */)
2602 {
2603 __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2604 __node_ptr __p = _M_begin();
2605 _M_before_begin._M_nxt = nullptr;
2606 std::size_t __bbegin_bkt = 0;
2607 std::size_t __prev_bkt = 0;
2608 __node_ptr __prev_p = nullptr;
2609 bool __check_bucket = false;
2610
2611 while (__p)
2612 {
2613 __node_ptr __next = __p->_M_next();
2614 std::size_t __bkt
2615 = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2616
2617 if (__prev_p && __prev_bkt == __bkt)
2618 {
2619 // Previous insert was already in this bucket, we insert after
2620 // the previously inserted one to preserve equivalent elements
2621 // relative order.
2622 __p->_M_nxt = __prev_p->_M_nxt;
2623 __prev_p->_M_nxt = __p;
2624
2625 // Inserting after a node in a bucket require to check that we
2626 // haven't change the bucket last node, in this case next
2627 // bucket containing its before begin node must be updated. We
2628 // schedule a check as soon as we move out of the sequence of
2629 // equivalent nodes to limit the number of checks.
2630 __check_bucket = true;
2631 }
2632 else
2633 {
2634 if (__check_bucket)
2635 {
2636 // Check if we shall update the next bucket because of
2637 // insertions into __prev_bkt bucket.
2638 if (__prev_p->_M_nxt)
2639 {
2640 std::size_t __next_bkt
2641 = __hash_code_base::_M_bucket_index(
2642 *__prev_p->_M_next(), __bkt_count);
2643 if (__next_bkt != __prev_bkt)
2644 __new_buckets[__next_bkt] = __prev_p;
2645 }
2646 __check_bucket = false;
2647 }
2648
2649 if (!__new_buckets[__bkt])
2650 {
2651 __p->_M_nxt = _M_before_begin._M_nxt;
2652 _M_before_begin._M_nxt = __p;
2653 __new_buckets[__bkt] = &_M_before_begin;
2654 if (__p->_M_nxt)
2655 __new_buckets[__bbegin_bkt] = __p;
2656 __bbegin_bkt = __bkt;
2657 }
2658 else
2659 {
2660 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2661 __new_buckets[__bkt]->_M_nxt = __p;
2662 }
2663 }
2664 __prev_p = __p;
2665 __prev_bkt = __bkt;
2666 __p = __next;
2667 }
2668
2669 if (__check_bucket && __prev_p->_M_nxt)
2670 {
2671 std::size_t __next_bkt
2672 = __hash_code_base::_M_bucket_index(*__prev_p->_M_next(),
2673 __bkt_count);
2674 if (__next_bkt != __prev_bkt)
2675 __new_buckets[__next_bkt] = __prev_p;
2676 }
2677
2678 _M_deallocate_buckets();
2679 _M_bucket_count = __bkt_count;
2680 _M_buckets = __new_buckets;
2681 }
2682
2683#if __cplusplus > 201402L
2684 template<typename, typename, typename> class _Hash_merge_helper { };
2685#endif // C++17
2686
2687#if __cpp_deduction_guides >= 201606
2688 // Used to constrain deduction guides
2689 template<typename _Hash>
2690 using _RequireNotAllocatorOrIntegral
2691 = __enable_if_t<!__or_<is_integral<_Hash>, __is_allocator<_Hash>>::value>;
2692#endif
2693
2694/// @endcond
2695_GLIBCXX_END_NAMESPACE_VERSION
2696} // namespace std
2697
2698#endif // _HASHTABLE_H
integral_constant< bool, true > true_type
The type used as a compile-time boolean with true value.
Definition: std/type_traits:82
integral_constant< bool, false > false_type
The type used as a compile-time boolean with false value.
Definition: std/type_traits:85
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition: move.h:104
void swap(any &__x, any &__y) noexcept
Exchange the states of two any objects.
Definition: std/any:429
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition: move.h:49
constexpr _Tp && forward(typename std::remove_reference< _Tp >::type &__t) noexcept
Forward an lvalue.
Definition: move.h:77
_Tp * end(valarray< _Tp > &__va) noexcept
Return an iterator pointing to one past the last element of the valarray.
Definition: valarray:1237
_Tp * begin(valarray< _Tp > &__va) noexcept
Return an iterator pointing to the first element of the valarray.
Definition: valarray:1215
constexpr const _Tp & max(const _Tp &, const _Tp &)
This does what you think it does.
Definition: stl_algobase.h:254
void swap(shared_lock< _Mutex > &__x, shared_lock< _Mutex > &__y) noexcept
Swap specialization for shared_lock.
Definition: shared_mutex:851
ISO C++ entities toplevel namespace is std.
constexpr iterator_traits< _InputIterator >::difference_type distance(_InputIterator __first, _InputIterator __last)
A generalization of pointer arithmetic.
constexpr auto cend(const _Container &__cont) noexcept(noexcept(std::end(__cont))) -> decltype(std::end(__cont))
Return an iterator pointing to one past the last element of the const container.
Definition: range_access.h:138
constexpr auto empty(const _Container &__cont) noexcept(noexcept(__cont.empty())) -> decltype(__cont.empty())
Return whether a container is empty.
Definition: range_access.h:283
constexpr auto size(const _Container &__cont) noexcept(noexcept(__cont.size())) -> decltype(__cont.size())
Return the size of a container.
Definition: range_access.h:264
constexpr auto cbegin(const _Container &__cont) noexcept(noexcept(std::begin(__cont))) -> decltype(std::begin(__cont))
Return an iterator pointing to the first element of the const container.
Definition: range_access.h:126
constexpr const _Tp * begin(initializer_list< _Tp > __ils) noexcept
Return an iterator pointing to the first element of the initializer_list.
constexpr const _Tp * end(initializer_list< _Tp > __ils) noexcept
Return an iterator pointing to one past the last element of the initializer_list.
Struct holding two objects of arbitrary type.
Definition: stl_pair.h:187
_T1 first
The first member.
Definition: stl_pair.h:191
_T2 second
The second member.
Definition: stl_pair.h:192