libstdc++
bits/hashtable.h
Go to the documentation of this file.
1 // hashtable.h header -*- C++ -*-
2 
3 // Copyright (C) 2007-2020 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/hashtable.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
28  */
29 
30 #ifndef _HASHTABLE_H
31 #define _HASHTABLE_H 1
32 
33 #pragma GCC system_header
34 
35 #include <bits/hashtable_policy.h>
36 #if __cplusplus > 201402L
37 # include <bits/node_handle.h>
38 #endif
39 
40 namespace std _GLIBCXX_VISIBILITY(default)
41 {
42 _GLIBCXX_BEGIN_NAMESPACE_VERSION
43 
44  template<typename _Tp, typename _Hash>
45  using __cache_default
46  = __not_<__and_<// Do not cache for fast hasher.
47  __is_fast_hash<_Hash>,
48  // Mandatory to have erase not throwing.
49  __is_nothrow_invocable<const _Hash&, const _Tp&>>>;
50 
51  /**
52  * Primary class template _Hashtable.
53  *
54  * @ingroup hashtable-detail
55  *
56  * @tparam _Value CopyConstructible type.
57  *
58  * @tparam _Key CopyConstructible type.
59  *
60  * @tparam _Alloc An allocator type
61  * ([lib.allocator.requirements]) whose _Alloc::value_type is
62  * _Value. As a conforming extension, we allow for
63  * _Alloc::value_type != _Value.
64  *
65  * @tparam _ExtractKey Function object that takes an object of type
66  * _Value and returns a value of type _Key.
67  *
68  * @tparam _Equal Function object that takes two objects of type k
69  * and returns a bool-like value that is true if the two objects
70  * are considered equal.
71  *
72  * @tparam _Hash The hash function. A unary function object with
73  * argument type _Key and result type size_t. Return values should
74  * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
75  *
76  * @tparam _RangeHash The range-hashing function (in the terminology of
77  * Tavori and Dreizin). A binary function object whose argument
78  * types and result type are all size_t. Given arguments r and N,
79  * the return value is in the range [0, N).
80  *
81  * @tparam _Unused Not used.
82  *
83  * @tparam _RehashPolicy Policy class with three members, all of
84  * which govern the bucket count. _M_next_bkt(n) returns a bucket
85  * count no smaller than n. _M_bkt_for_elements(n) returns a
86  * bucket count appropriate for an element count of n.
87  * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
88  * current bucket count is n_bkt and the current element count is
89  * n_elt, we need to increase the bucket count for n_ins insertions.
90  * If so, returns make_pair(true, n), where n is the new bucket count. If
91  * not, returns make_pair(false, <anything>)
92  *
93  * @tparam _Traits Compile-time class with three boolean
94  * std::integral_constant members: __cache_hash_code, __constant_iterators,
95  * __unique_keys.
96  *
97  * Each _Hashtable data structure has:
98  *
99  * - _Bucket[] _M_buckets
100  * - _Hash_node_base _M_before_begin
101  * - size_type _M_bucket_count
102  * - size_type _M_element_count
103  *
104  * with _Bucket being _Hash_node_base* and _Hash_node containing:
105  *
106  * - _Hash_node* _M_next
107  * - Tp _M_value
108  * - size_t _M_hash_code if cache_hash_code is true
109  *
110  * In terms of Standard containers the hashtable is like the aggregation of:
111  *
112  * - std::forward_list<_Node> containing the elements
113  * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
114  *
115  * The non-empty buckets contain the node before the first node in the
116  * bucket. This design makes it possible to implement something like a
117  * std::forward_list::insert_after on container insertion and
118  * std::forward_list::erase_after on container erase
119  * calls. _M_before_begin is equivalent to
120  * std::forward_list::before_begin. Empty buckets contain
121  * nullptr. Note that one of the non-empty buckets contains
122  * &_M_before_begin which is not a dereferenceable node so the
123  * node pointer in a bucket shall never be dereferenced, only its
124  * next node can be.
125  *
126  * Walking through a bucket's nodes requires a check on the hash code to
127  * see if each node is still in the bucket. Such a design assumes a
128  * quite efficient hash functor and is one of the reasons it is
129  * highly advisable to set __cache_hash_code to true.
130  *
131  * The container iterators are simply built from nodes. This way
132  * incrementing the iterator is perfectly efficient independent of
133  * how many empty buckets there are in the container.
134  *
135  * On insert we compute the element's hash code and use it to find the
136  * bucket index. If the element must be inserted in an empty bucket
137  * we add it at the beginning of the singly linked list and make the
138  * bucket point to _M_before_begin. The bucket that used to point to
139  * _M_before_begin, if any, is updated to point to its new before
140  * begin node.
141  *
142  * On erase, the simple iterator design requires using the hash
143  * functor to get the index of the bucket to update. For this
144  * reason, when __cache_hash_code is set to false the hash functor must
145  * not throw and this is enforced by a static assertion.
146  *
147  * Functionality is implemented by decomposition into base classes,
148  * where the derived _Hashtable class is used in _Map_base,
149  * _Insert, _Rehash_base, and _Equality base classes to access the
150  * "this" pointer. _Hashtable_base is used in the base classes as a
151  * non-recursive, fully-completed-type so that detailed nested type
152  * information, such as iterator type and node type, can be
153  * used. This is similar to the "Curiously Recurring Template
154  * Pattern" (CRTP) technique, but uses a reconstructed, not
155  * explicitly passed, template pattern.
156  *
157  * Base class templates are:
158  * - __detail::_Hashtable_base
159  * - __detail::_Map_base
160  * - __detail::_Insert
161  * - __detail::_Rehash_base
162  * - __detail::_Equality
163  */
164  template<typename _Key, typename _Value, typename _Alloc,
165  typename _ExtractKey, typename _Equal,
166  typename _Hash, typename _RangeHash, typename _Unused,
167  typename _RehashPolicy, typename _Traits>
169  : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
170  _Hash, _RangeHash, _Unused, _Traits>,
171  public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
172  _Hash, _RangeHash, _Unused,
173  _RehashPolicy, _Traits>,
174  public __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey, _Equal,
175  _Hash, _RangeHash, _Unused,
176  _RehashPolicy, _Traits>,
177  public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
178  _Hash, _RangeHash, _Unused,
179  _RehashPolicy, _Traits>,
180  public __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, _Equal,
181  _Hash, _RangeHash, _Unused,
182  _RehashPolicy, _Traits>,
184  __alloc_rebind<_Alloc,
185  __detail::_Hash_node<_Value,
186  _Traits::__hash_cached::value>>>
187  {
188  static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
189  "unordered container must have a non-const, non-volatile value_type");
190 #if __cplusplus > 201703L || defined __STRICT_ANSI__
192  "unordered container must have the same value_type as its allocator");
193 #endif
194 
195  using __traits_type = _Traits;
196  using __hash_cached = typename __traits_type::__hash_cached;
197  using __constant_iterators = typename __traits_type::__constant_iterators;
199  using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
200 
202 
203  using __node_value_type =
204  __detail::_Hash_node_value<_Value, __hash_cached::value>;
205  using __node_ptr = typename __hashtable_alloc::__node_ptr;
206  using __value_alloc_traits =
207  typename __hashtable_alloc::__value_alloc_traits;
208  using __node_alloc_traits =
210  using __node_base = typename __hashtable_alloc::__node_base;
211  using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
212  using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
213 
214  using __insert_base = __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey,
215  _Equal, _Hash,
216  _RangeHash, _Unused,
217  _RehashPolicy, _Traits>;
218 
219  public:
220  typedef _Key key_type;
221  typedef _Value value_type;
222  typedef _Alloc allocator_type;
223  typedef _Equal key_equal;
224 
225  // mapped_type, if present, comes from _Map_base.
226  // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
227  typedef typename __value_alloc_traits::pointer pointer;
228  typedef typename __value_alloc_traits::const_pointer const_pointer;
229  typedef value_type& reference;
230  typedef const value_type& const_reference;
231 
232  using iterator = typename __insert_base::iterator;
233 
234  using const_iterator = typename __insert_base::const_iterator;
235 
236  using local_iterator = __detail::_Local_iterator<key_type, _Value,
237  _ExtractKey, _Hash, _RangeHash, _Unused,
238  __constant_iterators::value,
239  __hash_cached::value>;
240 
242  key_type, _Value,
243  _ExtractKey, _Hash, _RangeHash, _Unused,
244  __constant_iterators::value, __hash_cached::value>;
245 
246  private:
247  using __rehash_type = _RehashPolicy;
248  using __rehash_state = typename __rehash_type::_State;
249 
250  using __unique_keys = typename __traits_type::__unique_keys;
251 
252  using __hashtable_base = __detail::
253  _Hashtable_base<_Key, _Value, _ExtractKey,
254  _Equal, _Hash, _RangeHash, _Unused, _Traits>;
255 
256  using __hash_code_base = typename __hashtable_base::__hash_code_base;
257  using __hash_code = typename __hashtable_base::__hash_code;
258  using __ireturn_type = typename __insert_base::__ireturn_type;
259 
260  using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
261  _Equal, _Hash, _RangeHash, _Unused,
262  _RehashPolicy, _Traits>;
263 
264  using __rehash_base = __detail::_Rehash_base<_Key, _Value, _Alloc,
265  _ExtractKey, _Equal,
266  _Hash, _RangeHash, _Unused,
267  _RehashPolicy, _Traits>;
268 
269  using __eq_base = __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey,
270  _Equal, _Hash, _RangeHash, _Unused,
271  _RehashPolicy, _Traits>;
272 
273  using __reuse_or_alloc_node_gen_t =
274  __detail::_ReuseOrAllocNode<__node_alloc_type>;
275  using __alloc_node_gen_t =
276  __detail::_AllocNode<__node_alloc_type>;
277 
278  // Simple RAII type for managing a node containing an element
279  struct _Scoped_node
280  {
281  // Take ownership of a node with a constructed element.
282  _Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
283  : _M_h(__h), _M_node(__n) { }
284 
285  // Allocate a node and construct an element within it.
286  template<typename... _Args>
287  _Scoped_node(__hashtable_alloc* __h, _Args&&... __args)
288  : _M_h(__h),
289  _M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...))
290  { }
291 
292  // Destroy element and deallocate node.
293  ~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); };
294 
295  _Scoped_node(const _Scoped_node&) = delete;
296  _Scoped_node& operator=(const _Scoped_node&) = delete;
297 
298  __hashtable_alloc* _M_h;
299  __node_ptr _M_node;
300  };
301 
302  template<typename _Ht>
303  static constexpr
305  const value_type&, value_type&&>::type
306  __fwd_value_for(value_type& __val) noexcept
307  { return std::move(__val); }
308 
309  // Compile-time diagnostics.
310 
311  // _Hash_code_base has everything protected, so use this derived type to
312  // access it.
313  struct __hash_code_base_access : __hash_code_base
314  { using __hash_code_base::_M_bucket_index; };
315 
316  // Getting a bucket index from a node shall not throw because it is used
317  // in methods (erase, swap...) that shall not throw.
318  static_assert(noexcept(declval<const __hash_code_base_access&>()
319  ._M_bucket_index(declval<const __node_value_type&>(),
320  (std::size_t)0)),
321  "Cache the hash code or qualify your functors involved"
322  " in hash code and bucket index computation with noexcept");
323 
324  // To get bucket index we need _RangeHash not to throw.
326  "Functor used to map hash code to bucket index"
327  " must be nothrow default constructible");
328  static_assert(noexcept(
329  std::declval<const _RangeHash&>()((std::size_t)0, (std::size_t)0)),
330  "Functor used to map hash code to bucket index must be"
331  " noexcept");
332 
333  // To compute bucket index we also need _ExtratKey not to throw.
335  "_ExtractKey must be nothrow default constructible");
336  static_assert(noexcept(
337  std::declval<const _ExtractKey&>()(std::declval<_Value>())),
338  "_ExtractKey functor must be noexcept invocable");
339 
340  template<typename _Keya, typename _Valuea, typename _Alloca,
341  typename _ExtractKeya, typename _Equala,
342  typename _Hasha, typename _RangeHasha, typename _Unuseda,
343  typename _RehashPolicya, typename _Traitsa,
344  bool _Unique_keysa>
345  friend struct __detail::_Map_base;
346 
347  template<typename _Keya, typename _Valuea, typename _Alloca,
348  typename _ExtractKeya, typename _Equala,
349  typename _Hasha, typename _RangeHasha, typename _Unuseda,
350  typename _RehashPolicya, typename _Traitsa>
351  friend struct __detail::_Insert_base;
352 
353  template<typename _Keya, typename _Valuea, typename _Alloca,
354  typename _ExtractKeya, typename _Equala,
355  typename _Hasha, typename _RangeHasha, typename _Unuseda,
356  typename _RehashPolicya, typename _Traitsa,
357  bool _Constant_iteratorsa>
358  friend struct __detail::_Insert;
359 
360  template<typename _Keya, typename _Valuea, typename _Alloca,
361  typename _ExtractKeya, typename _Equala,
362  typename _Hasha, typename _RangeHasha, typename _Unuseda,
363  typename _RehashPolicya, typename _Traitsa,
364  bool _Unique_keysa>
365  friend struct __detail::_Equality;
366 
367  public:
368  using size_type = typename __hashtable_base::size_type;
369  using difference_type = typename __hashtable_base::difference_type;
370 
371 #if __cplusplus > 201402L
374 #endif
375 
376  private:
377  __buckets_ptr _M_buckets = &_M_single_bucket;
378  size_type _M_bucket_count = 1;
379  __node_base _M_before_begin;
380  size_type _M_element_count = 0;
381  _RehashPolicy _M_rehash_policy;
382 
383  // A single bucket used when only need for 1 bucket. Especially
384  // interesting in move semantic to leave hashtable with only 1 bucket
385  // which is not allocated so that we can have those operations noexcept
386  // qualified.
387  // Note that we can't leave hashtable with 0 bucket without adding
388  // numerous checks in the code to avoid 0 modulus.
389  __node_base_ptr _M_single_bucket = nullptr;
390 
391  void
392  _M_update_bbegin()
393  {
394  if (_M_begin())
395  _M_buckets[_M_bucket_index(*_M_begin())] = &_M_before_begin;
396  }
397 
398  void
399  _M_update_bbegin(__node_ptr __n)
400  {
401  _M_before_begin._M_nxt = __n;
402  _M_update_bbegin();
403  }
404 
405  bool
406  _M_uses_single_bucket(__buckets_ptr __bkts) const
407  { return __builtin_expect(__bkts == &_M_single_bucket, false); }
408 
409  bool
410  _M_uses_single_bucket() const
411  { return _M_uses_single_bucket(_M_buckets); }
412 
414  _M_base_alloc() { return *this; }
415 
416  __buckets_ptr
417  _M_allocate_buckets(size_type __bkt_count)
418  {
419  if (__builtin_expect(__bkt_count == 1, false))
420  {
421  _M_single_bucket = nullptr;
422  return &_M_single_bucket;
423  }
424 
425  return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
426  }
427 
428  void
429  _M_deallocate_buckets(__buckets_ptr __bkts, size_type __bkt_count)
430  {
431  if (_M_uses_single_bucket(__bkts))
432  return;
433 
434  __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
435  }
436 
437  void
438  _M_deallocate_buckets()
439  { _M_deallocate_buckets(_M_buckets, _M_bucket_count); }
440 
441  // Gets bucket begin, deals with the fact that non-empty buckets contain
442  // their before begin node.
443  __node_ptr
444  _M_bucket_begin(size_type __bkt) const;
445 
446  __node_ptr
447  _M_begin() const
448  { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
449 
450  // Assign *this using another _Hashtable instance. Whether elements
451  // are copied or moved depends on the _Ht reference.
452  template<typename _Ht>
453  void
454  _M_assign_elements(_Ht&&);
455 
456  template<typename _Ht, typename _NodeGenerator>
457  void
458  _M_assign(_Ht&&, const _NodeGenerator&);
459 
460  void
461  _M_move_assign(_Hashtable&&, true_type);
462 
463  void
464  _M_move_assign(_Hashtable&&, false_type);
465 
466  void
467  _M_reset() noexcept;
468 
469  _Hashtable(const _Hash& __h, const _Equal& __eq,
470  const allocator_type& __a)
471  : __hashtable_base(__h, __eq),
472  __hashtable_alloc(__node_alloc_type(__a))
473  { }
474 
475  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
476  true_type /* alloc always equal */)
479 
480  _Hashtable(_Hashtable&&, __node_alloc_type&&,
481  false_type /* alloc always equal */);
482 
483  template<typename _InputIterator>
484  _Hashtable(_InputIterator __first, _InputIterator __last,
485  size_type __bkt_count_hint,
486  const _Hash&, const _Equal&, const allocator_type&,
487  true_type __uks);
488 
489  template<typename _InputIterator>
490  _Hashtable(_InputIterator __first, _InputIterator __last,
491  size_type __bkt_count_hint,
492  const _Hash&, const _Equal&, const allocator_type&,
493  false_type __uks);
494 
495  public:
496  // Constructor, destructor, assignment, swap
497  _Hashtable() = default;
498 
499  _Hashtable(const _Hashtable&);
500 
501  _Hashtable(const _Hashtable&, const allocator_type&);
502 
503  explicit
504  _Hashtable(size_type __bkt_count_hint,
505  const _Hash& __hf = _Hash(),
506  const key_equal& __eql = key_equal(),
507  const allocator_type& __a = allocator_type());
508 
509  // Use delegating constructors.
510  _Hashtable(_Hashtable&& __ht)
511  noexcept( noexcept(
512  _Hashtable(std::declval<_Hashtable>(),
513  std::declval<__node_alloc_type>(),
514  true_type{})) )
515  : _Hashtable(std::move(__ht), std::move(__ht._M_node_allocator()),
516  true_type{})
517  { }
518 
519  _Hashtable(_Hashtable&& __ht, const allocator_type& __a)
520  noexcept( noexcept(
521  _Hashtable(std::declval<_Hashtable>(),
522  std::declval<__node_alloc_type>(),
523  typename __node_alloc_traits::is_always_equal{})) )
524  : _Hashtable(std::move(__ht), __node_alloc_type(__a),
525  typename __node_alloc_traits::is_always_equal{})
526  { }
527 
528  explicit
529  _Hashtable(const allocator_type& __a)
530  : __hashtable_alloc(__node_alloc_type(__a))
531  { }
532 
533  template<typename _InputIterator>
534  _Hashtable(_InputIterator __f, _InputIterator __l,
535  size_type __bkt_count_hint = 0,
536  const _Hash& __hf = _Hash(),
537  const key_equal& __eql = key_equal(),
538  const allocator_type& __a = allocator_type())
539  : _Hashtable(__f, __l, __bkt_count_hint, __hf, __eql, __a,
540  __unique_keys{})
541  { }
542 
544  size_type __bkt_count_hint = 0,
545  const _Hash& __hf = _Hash(),
546  const key_equal& __eql = key_equal(),
547  const allocator_type& __a = allocator_type())
548  : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
549  __hf, __eql, __a, __unique_keys{})
550  { }
551 
552  _Hashtable&
553  operator=(const _Hashtable& __ht);
554 
555  _Hashtable&
556  operator=(_Hashtable&& __ht)
557  noexcept(__node_alloc_traits::_S_nothrow_move()
560  {
561  constexpr bool __move_storage =
562  __node_alloc_traits::_S_propagate_on_move_assign()
563  || __node_alloc_traits::_S_always_equal();
564  _M_move_assign(std::move(__ht), __bool_constant<__move_storage>());
565  return *this;
566  }
567 
568  _Hashtable&
570  {
571  __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
572  _M_before_begin._M_nxt = nullptr;
573  clear();
574 
575  // We consider that all elements of __l are going to be inserted.
576  auto __l_bkt_count = _M_rehash_policy._M_bkt_for_elements(__l.size());
577 
578  // Do not shrink to keep potential user reservation.
579  if (_M_bucket_count < __l_bkt_count)
580  rehash(__l_bkt_count);
581 
582  this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys{});
583  return *this;
584  }
585 
586  ~_Hashtable() noexcept;
587 
588  void
589  swap(_Hashtable&)
590  noexcept(__and_<__is_nothrow_swappable<_Hash>,
591  __is_nothrow_swappable<_Equal>>::value);
592 
593  // Basic container operations
594  iterator
595  begin() noexcept
596  { return iterator(_M_begin()); }
597 
598  const_iterator
599  begin() const noexcept
600  { return const_iterator(_M_begin()); }
601 
602  iterator
603  end() noexcept
604  { return iterator(nullptr); }
605 
606  const_iterator
607  end() const noexcept
608  { return const_iterator(nullptr); }
609 
610  const_iterator
611  cbegin() const noexcept
612  { return const_iterator(_M_begin()); }
613 
614  const_iterator
615  cend() const noexcept
616  { return const_iterator(nullptr); }
617 
618  size_type
619  size() const noexcept
620  { return _M_element_count; }
621 
622  _GLIBCXX_NODISCARD bool
623  empty() const noexcept
624  { return size() == 0; }
625 
626  allocator_type
627  get_allocator() const noexcept
628  { return allocator_type(this->_M_node_allocator()); }
629 
630  size_type
631  max_size() const noexcept
632  { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
633 
634  // Observers
635  key_equal
636  key_eq() const
637  { return this->_M_eq(); }
638 
639  // hash_function, if present, comes from _Hash_code_base.
640 
641  // Bucket operations
642  size_type
643  bucket_count() const noexcept
644  { return _M_bucket_count; }
645 
646  size_type
647  max_bucket_count() const noexcept
648  { return max_size(); }
649 
650  size_type
651  bucket_size(size_type __bkt) const
652  { return std::distance(begin(__bkt), end(__bkt)); }
653 
654  size_type
655  bucket(const key_type& __k) const
656  { return _M_bucket_index(this->_M_hash_code(__k)); }
657 
659  begin(size_type __bkt)
660  {
661  return local_iterator(*this, _M_bucket_begin(__bkt),
662  __bkt, _M_bucket_count);
663  }
664 
666  end(size_type __bkt)
667  { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
668 
670  begin(size_type __bkt) const
671  {
672  return const_local_iterator(*this, _M_bucket_begin(__bkt),
673  __bkt, _M_bucket_count);
674  }
675 
677  end(size_type __bkt) const
678  { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
679 
680  // DR 691.
682  cbegin(size_type __bkt) const
683  {
684  return const_local_iterator(*this, _M_bucket_begin(__bkt),
685  __bkt, _M_bucket_count);
686  }
687 
689  cend(size_type __bkt) const
690  { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
691 
692  float
693  load_factor() const noexcept
694  {
695  return static_cast<float>(size()) / static_cast<float>(bucket_count());
696  }
697 
698  // max_load_factor, if present, comes from _Rehash_base.
699 
700  // Generalization of max_load_factor. Extension, not found in
701  // TR1. Only useful if _RehashPolicy is something other than
702  // the default.
703  const _RehashPolicy&
704  __rehash_policy() const
705  { return _M_rehash_policy; }
706 
707  void
708  __rehash_policy(const _RehashPolicy& __pol)
709  { _M_rehash_policy = __pol; }
710 
711  // Lookup.
712  iterator
713  find(const key_type& __k);
714 
715  const_iterator
716  find(const key_type& __k) const;
717 
718  size_type
719  count(const key_type& __k) const;
720 
722  equal_range(const key_type& __k);
723 
725  equal_range(const key_type& __k) const;
726 
727  private:
728  // Bucket index computation helpers.
729  size_type
730  _M_bucket_index(const __node_value_type& __n) const noexcept
731  { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
732 
733  size_type
734  _M_bucket_index(__hash_code __c) const
735  { return __hash_code_base::_M_bucket_index(__c, _M_bucket_count); }
736 
737  // Find and insert helper functions and types
738  // Find the node before the one matching the criteria.
739  __node_base_ptr
740  _M_find_before_node(size_type, const key_type&, __hash_code) const;
741 
742  __node_ptr
743  _M_find_node(size_type __bkt, const key_type& __key,
744  __hash_code __c) const
745  {
746  __node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c);
747  if (__before_n)
748  return static_cast<__node_ptr>(__before_n->_M_nxt);
749  return nullptr;
750  }
751 
752  // Insert a node at the beginning of a bucket.
753  void
754  _M_insert_bucket_begin(size_type, __node_ptr);
755 
756  // Remove the bucket first node
757  void
758  _M_remove_bucket_begin(size_type __bkt, __node_ptr __next_n,
759  size_type __next_bkt);
760 
761  // Get the node before __n in the bucket __bkt
762  __node_base_ptr
763  _M_get_previous_node(size_type __bkt, __node_ptr __n);
764 
765  // Insert node __n with hash code __code, in bucket __bkt if no
766  // rehash (assumes no element with same key already present).
767  // Takes ownership of __n if insertion succeeds, throws otherwise.
768  iterator
769  _M_insert_unique_node(size_type __bkt, __hash_code,
770  __node_ptr __n, size_type __n_elt = 1);
771 
772  // Insert node __n with key __k and hash code __code.
773  // Takes ownership of __n if insertion succeeds, throws otherwise.
774  iterator
775  _M_insert_multi_node(__node_ptr __hint,
776  __hash_code __code, __node_ptr __n);
777 
778  template<typename... _Args>
780  _M_emplace(true_type __uks, _Args&&... __args);
781 
782  template<typename... _Args>
783  iterator
784  _M_emplace(false_type __uks, _Args&&... __args)
785  { return _M_emplace(cend(), __uks, std::forward<_Args>(__args)...); }
786 
787  // Emplace with hint, useless when keys are unique.
788  template<typename... _Args>
789  iterator
790  _M_emplace(const_iterator, true_type __uks, _Args&&... __args)
791  { return _M_emplace(__uks, std::forward<_Args>(__args)...).first; }
792 
793  template<typename... _Args>
794  iterator
795  _M_emplace(const_iterator, false_type __uks, _Args&&... __args);
796 
797  template<typename _Arg, typename _NodeGenerator>
799  _M_insert(_Arg&&, const _NodeGenerator&, true_type __uks);
800 
801  template<typename _Arg, typename _NodeGenerator>
802  iterator
803  _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
804  false_type __uks)
805  {
806  return _M_insert(cend(), std::forward<_Arg>(__arg), __node_gen,
807  __uks);
808  }
809 
810  // Insert with hint, not used when keys are unique.
811  template<typename _Arg, typename _NodeGenerator>
812  iterator
813  _M_insert(const_iterator, _Arg&& __arg,
814  const _NodeGenerator& __node_gen, true_type __uks)
815  {
816  return
817  _M_insert(std::forward<_Arg>(__arg), __node_gen, __uks).first;
818  }
819 
820  // Insert with hint when keys are not unique.
821  template<typename _Arg, typename _NodeGenerator>
822  iterator
823  _M_insert(const_iterator, _Arg&&,
824  const _NodeGenerator&, false_type __uks);
825 
826  size_type
827  _M_erase(true_type __uks, const key_type&);
828 
829  size_type
830  _M_erase(false_type __uks, const key_type&);
831 
832  iterator
833  _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
834 
835  public:
836  // Emplace
837  template<typename... _Args>
838  __ireturn_type
839  emplace(_Args&&... __args)
840  { return _M_emplace(__unique_keys{}, std::forward<_Args>(__args)...); }
841 
842  template<typename... _Args>
843  iterator
844  emplace_hint(const_iterator __hint, _Args&&... __args)
845  {
846  return _M_emplace(__hint, __unique_keys{},
847  std::forward<_Args>(__args)...);
848  }
849 
850  // Insert member functions via inheritance.
851 
852  // Erase
853  iterator
854  erase(const_iterator);
855 
856  // LWG 2059.
857  iterator
858  erase(iterator __it)
859  { return erase(const_iterator(__it)); }
860 
861  size_type
862  erase(const key_type& __k)
863  { return _M_erase(__unique_keys{}, __k); }
864 
865  iterator
866  erase(const_iterator, const_iterator);
867 
868  void
869  clear() noexcept;
870 
871  // Set number of buckets keeping it appropriate for container's number
872  // of elements.
873  void rehash(size_type __bkt_count);
874 
875  // DR 1189.
876  // reserve, if present, comes from _Rehash_base.
877 
878 #if __cplusplus > 201402L
879  /// Re-insert an extracted node into a container with unique keys.
882  {
883  insert_return_type __ret;
884  if (__nh.empty())
885  __ret.position = end();
886  else
887  {
888  __glibcxx_assert(get_allocator() == __nh.get_allocator());
889 
890  const key_type& __k = __nh._M_key();
891  __hash_code __code = this->_M_hash_code(__k);
892  size_type __bkt = _M_bucket_index(__code);
893  if (__node_ptr __n = _M_find_node(__bkt, __k, __code))
894  {
895  __ret.node = std::move(__nh);
896  __ret.position = iterator(__n);
897  __ret.inserted = false;
898  }
899  else
900  {
901  __ret.position
902  = _M_insert_unique_node(__bkt, __code, __nh._M_ptr);
903  __nh._M_ptr = nullptr;
904  __ret.inserted = true;
905  }
906  }
907  return __ret;
908  }
909 
910  /// Re-insert an extracted node into a container with equivalent keys.
911  iterator
912  _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh)
913  {
914  if (__nh.empty())
915  return end();
916 
917  __glibcxx_assert(get_allocator() == __nh.get_allocator());
918 
919  const key_type& __k = __nh._M_key();
920  auto __code = this->_M_hash_code(__k);
921  auto __ret
922  = _M_insert_multi_node(__hint._M_cur, __code, __nh._M_ptr);
923  __nh._M_ptr = nullptr;
924  return __ret;
925  }
926 
927  private:
928  node_type
929  _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
930  {
931  __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
932  if (__prev_n == _M_buckets[__bkt])
933  _M_remove_bucket_begin(__bkt, __n->_M_next(),
934  __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
935  else if (__n->_M_nxt)
936  {
937  size_type __next_bkt = _M_bucket_index(*__n->_M_next());
938  if (__next_bkt != __bkt)
939  _M_buckets[__next_bkt] = __prev_n;
940  }
941 
942  __prev_n->_M_nxt = __n->_M_nxt;
943  __n->_M_nxt = nullptr;
944  --_M_element_count;
945  return { __n, this->_M_node_allocator() };
946  }
947 
948  public:
949  // Extract a node.
950  node_type
951  extract(const_iterator __pos)
952  {
953  size_t __bkt = _M_bucket_index(*__pos._M_cur);
954  return _M_extract_node(__bkt,
955  _M_get_previous_node(__bkt, __pos._M_cur));
956  }
957 
958  /// Extract a node.
959  node_type
960  extract(const _Key& __k)
961  {
962  node_type __nh;
963  __hash_code __code = this->_M_hash_code(__k);
964  std::size_t __bkt = _M_bucket_index(__code);
965  if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
966  __nh = _M_extract_node(__bkt, __prev_node);
967  return __nh;
968  }
969 
970  /// Merge from a compatible container into one with unique keys.
971  template<typename _Compatible_Hashtable>
972  void
973  _M_merge_unique(_Compatible_Hashtable& __src) noexcept
974  {
975  static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
976  node_type>, "Node types are compatible");
977  __glibcxx_assert(get_allocator() == __src.get_allocator());
978 
979  auto __n_elt = __src.size();
980  for (auto __i = __src.begin(), __end = __src.end(); __i != __end;)
981  {
982  auto __pos = __i++;
983  const key_type& __k = _ExtractKey{}(*__pos);
984  __hash_code __code = this->_M_hash_code(__k);
985  size_type __bkt = _M_bucket_index(__code);
986  if (_M_find_node(__bkt, __k, __code) == nullptr)
987  {
988  auto __nh = __src.extract(__pos);
989  _M_insert_unique_node(__bkt, __code, __nh._M_ptr, __n_elt);
990  __nh._M_ptr = nullptr;
991  __n_elt = 1;
992  }
993  else if (__n_elt != 1)
994  --__n_elt;
995  }
996  }
997 
998  /// Merge from a compatible container into one with equivalent keys.
999  template<typename _Compatible_Hashtable>
1000  void
1001  _M_merge_multi(_Compatible_Hashtable& __src) noexcept
1002  {
1003  static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1004  node_type>, "Node types are compatible");
1005  __glibcxx_assert(get_allocator() == __src.get_allocator());
1006 
1007  this->reserve(size() + __src.size());
1008  for (auto __i = __src.begin(), __end = __src.end(); __i != __end;)
1009  _M_reinsert_node_multi(cend(), __src.extract(__i++));
1010  }
1011 #endif // C++17
1012 
1013  private:
1014  // Helper rehash method used when keys are unique.
1015  void _M_rehash_aux(size_type __bkt_count, true_type __uks);
1016 
1017  // Helper rehash method used when keys can be non-unique.
1018  void _M_rehash_aux(size_type __bkt_count, false_type __uks);
1019 
1020  // Unconditionally change size of bucket array to n, restore
1021  // hash policy state to __state on exception.
1022  void _M_rehash(size_type __bkt_count, const __rehash_state& __state);
1023  };
1024 
1025 
1026  // Definitions of class template _Hashtable's out-of-line member functions.
1027  template<typename _Key, typename _Value, typename _Alloc,
1028  typename _ExtractKey, typename _Equal,
1029  typename _Hash, typename _RangeHash, typename _Unused,
1030  typename _RehashPolicy, typename _Traits>
1031  auto
1032  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1033  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1034  _M_bucket_begin(size_type __bkt) const
1035  -> __node_ptr
1036  {
1037  __node_base_ptr __n = _M_buckets[__bkt];
1038  return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
1039  }
1040 
1041  template<typename _Key, typename _Value, typename _Alloc,
1042  typename _ExtractKey, typename _Equal,
1043  typename _Hash, typename _RangeHash, typename _Unused,
1044  typename _RehashPolicy, typename _Traits>
1045  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1046  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1047  _Hashtable(size_type __bkt_count_hint,
1048  const _Hash& __h, const _Equal& __eq, const allocator_type& __a)
1049  : _Hashtable(__h, __eq, __a)
1050  {
1051  auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint);
1052  if (__bkt_count > _M_bucket_count)
1053  {
1054  _M_buckets = _M_allocate_buckets(__bkt_count);
1055  _M_bucket_count = __bkt_count;
1056  }
1057  }
1058 
1059  template<typename _Key, typename _Value, typename _Alloc,
1060  typename _ExtractKey, typename _Equal,
1061  typename _Hash, typename _RangeHash, typename _Unused,
1062  typename _RehashPolicy, typename _Traits>
1063  template<typename _InputIterator>
1064  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1065  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1066  _Hashtable(_InputIterator __f, _InputIterator __l,
1067  size_type __bkt_count_hint,
1068  const _Hash& __h, const _Equal& __eq,
1069  const allocator_type& __a, true_type /* __uks */)
1070  : _Hashtable(__bkt_count_hint, __h, __eq, __a)
1071  {
1072  for (; __f != __l; ++__f)
1073  this->insert(*__f);
1074  }
1075 
1076  template<typename _Key, typename _Value, typename _Alloc,
1077  typename _ExtractKey, typename _Equal,
1078  typename _Hash, typename _RangeHash, typename _Unused,
1079  typename _RehashPolicy, typename _Traits>
1080  template<typename _InputIterator>
1081  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1082  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1083  _Hashtable(_InputIterator __f, _InputIterator __l,
1084  size_type __bkt_count_hint,
1085  const _Hash& __h, const _Equal& __eq,
1086  const allocator_type& __a, false_type /* __uks */)
1087  : _Hashtable(__h, __eq, __a)
1088  {
1089  auto __nb_elems = __detail::__distance_fw(__f, __l);
1090  auto __bkt_count =
1091  _M_rehash_policy._M_next_bkt(
1092  std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems),
1093  __bkt_count_hint));
1094 
1095  if (__bkt_count > _M_bucket_count)
1096  {
1097  _M_buckets = _M_allocate_buckets(__bkt_count);
1098  _M_bucket_count = __bkt_count;
1099  }
1100 
1101  for (; __f != __l; ++__f)
1102  this->insert(*__f);
1103  }
1104 
1105  template<typename _Key, typename _Value, typename _Alloc,
1106  typename _ExtractKey, typename _Equal,
1107  typename _Hash, typename _RangeHash, typename _Unused,
1108  typename _RehashPolicy, typename _Traits>
1109  auto
1110  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1111  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1112  operator=(const _Hashtable& __ht)
1113  -> _Hashtable&
1114  {
1115  if (&__ht == this)
1116  return *this;
1117 
1118  if (__node_alloc_traits::_S_propagate_on_copy_assign())
1119  {
1120  auto& __this_alloc = this->_M_node_allocator();
1121  auto& __that_alloc = __ht._M_node_allocator();
1122  if (!__node_alloc_traits::_S_always_equal()
1123  && __this_alloc != __that_alloc)
1124  {
1125  // Replacement allocator cannot free existing storage.
1126  this->_M_deallocate_nodes(_M_begin());
1127  _M_before_begin._M_nxt = nullptr;
1128  _M_deallocate_buckets();
1129  _M_buckets = nullptr;
1130  std::__alloc_on_copy(__this_alloc, __that_alloc);
1132  _M_bucket_count = __ht._M_bucket_count;
1133  _M_element_count = __ht._M_element_count;
1134  _M_rehash_policy = __ht._M_rehash_policy;
1135  __alloc_node_gen_t __alloc_node_gen(*this);
1136  __try
1137  {
1138  _M_assign(__ht, __alloc_node_gen);
1139  }
1140  __catch(...)
1141  {
1142  // _M_assign took care of deallocating all memory. Now we
1143  // must make sure this instance remains in a usable state.
1144  _M_reset();
1145  __throw_exception_again;
1146  }
1147  return *this;
1148  }
1149  std::__alloc_on_copy(__this_alloc, __that_alloc);
1150  }
1151 
1152  // Reuse allocated buckets and nodes.
1153  _M_assign_elements(__ht);
1154  return *this;
1155  }
1156 
1157  template<typename _Key, typename _Value, typename _Alloc,
1158  typename _ExtractKey, typename _Equal,
1159  typename _Hash, typename _RangeHash, typename _Unused,
1160  typename _RehashPolicy, typename _Traits>
1161  template<typename _Ht>
1162  void
1163  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1164  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1165  _M_assign_elements(_Ht&& __ht)
1166  {
1167  __buckets_ptr __former_buckets = nullptr;
1168  std::size_t __former_bucket_count = _M_bucket_count;
1169  const __rehash_state& __former_state = _M_rehash_policy._M_state();
1170 
1171  if (_M_bucket_count != __ht._M_bucket_count)
1172  {
1173  __former_buckets = _M_buckets;
1174  _M_buckets = _M_allocate_buckets(__ht._M_bucket_count);
1175  _M_bucket_count = __ht._M_bucket_count;
1176  }
1177  else
1178  __builtin_memset(_M_buckets, 0,
1179  _M_bucket_count * sizeof(__node_base_ptr));
1180 
1181  __try
1182  {
1183  __hashtable_base::operator=(std::forward<_Ht>(__ht));
1184  _M_element_count = __ht._M_element_count;
1185  _M_rehash_policy = __ht._M_rehash_policy;
1186  __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
1187  _M_before_begin._M_nxt = nullptr;
1188  _M_assign(std::forward<_Ht>(__ht), __roan);
1189  if (__former_buckets)
1190  _M_deallocate_buckets(__former_buckets, __former_bucket_count);
1191  }
1192  __catch(...)
1193  {
1194  if (__former_buckets)
1195  {
1196  // Restore previous buckets.
1197  _M_deallocate_buckets();
1198  _M_rehash_policy._M_reset(__former_state);
1199  _M_buckets = __former_buckets;
1200  _M_bucket_count = __former_bucket_count;
1201  }
1202  __builtin_memset(_M_buckets, 0,
1203  _M_bucket_count * sizeof(__node_base_ptr));
1204  __throw_exception_again;
1205  }
1206  }
1207 
1208  template<typename _Key, typename _Value, typename _Alloc,
1209  typename _ExtractKey, typename _Equal,
1210  typename _Hash, typename _RangeHash, typename _Unused,
1211  typename _RehashPolicy, typename _Traits>
1212  template<typename _Ht, typename _NodeGenerator>
1213  void
1214  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1215  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1216  _M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
1217  {
1218  __buckets_ptr __buckets = nullptr;
1219  if (!_M_buckets)
1220  _M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count);
1221 
1222  __try
1223  {
1224  if (!__ht._M_before_begin._M_nxt)
1225  return;
1226 
1227  // First deal with the special first node pointed to by
1228  // _M_before_begin.
1229  __node_ptr __ht_n = __ht._M_begin();
1230  __node_ptr __this_n
1231  = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1232  this->_M_copy_code(*__this_n, *__ht_n);
1233  _M_update_bbegin(__this_n);
1234 
1235  // Then deal with other nodes.
1236  __node_ptr __prev_n = __this_n;
1237  for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
1238  {
1239  __this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1240  __prev_n->_M_nxt = __this_n;
1241  this->_M_copy_code(*__this_n, *__ht_n);
1242  size_type __bkt = _M_bucket_index(*__this_n);
1243  if (!_M_buckets[__bkt])
1244  _M_buckets[__bkt] = __prev_n;
1245  __prev_n = __this_n;
1246  }
1247  }
1248  __catch(...)
1249  {
1250  clear();
1251  if (__buckets)
1252  _M_deallocate_buckets();
1253  __throw_exception_again;
1254  }
1255  }
1256 
1257  template<typename _Key, typename _Value, typename _Alloc,
1258  typename _ExtractKey, typename _Equal,
1259  typename _Hash, typename _RangeHash, typename _Unused,
1260  typename _RehashPolicy, typename _Traits>
1261  void
1262  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1263  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1264  _M_reset() noexcept
1265  {
1266  _M_rehash_policy._M_reset();
1267  _M_bucket_count = 1;
1268  _M_single_bucket = nullptr;
1269  _M_buckets = &_M_single_bucket;
1270  _M_before_begin._M_nxt = nullptr;
1271  _M_element_count = 0;
1272  }
1273 
1274  template<typename _Key, typename _Value, typename _Alloc,
1275  typename _ExtractKey, typename _Equal,
1276  typename _Hash, typename _RangeHash, typename _Unused,
1277  typename _RehashPolicy, typename _Traits>
1278  void
1279  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1280  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1281  _M_move_assign(_Hashtable&& __ht, true_type)
1282  {
1283  if (__builtin_expect(std::__addressof(__ht) == this, false))
1284  return;
1285 
1286  this->_M_deallocate_nodes(_M_begin());
1287  _M_deallocate_buckets();
1289  _M_rehash_policy = __ht._M_rehash_policy;
1290  if (!__ht._M_uses_single_bucket())
1291  _M_buckets = __ht._M_buckets;
1292  else
1293  {
1294  _M_buckets = &_M_single_bucket;
1295  _M_single_bucket = __ht._M_single_bucket;
1296  }
1297 
1298  _M_bucket_count = __ht._M_bucket_count;
1299  _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
1300  _M_element_count = __ht._M_element_count;
1301  std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator());
1302 
1303  // Fix bucket containing the _M_before_begin pointer that can't be moved.
1304  _M_update_bbegin();
1305  __ht._M_reset();
1306  }
1307 
1308  template<typename _Key, typename _Value, typename _Alloc,
1309  typename _ExtractKey, typename _Equal,
1310  typename _Hash, typename _RangeHash, typename _Unused,
1311  typename _RehashPolicy, typename _Traits>
1312  void
1313  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1314  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1315  _M_move_assign(_Hashtable&& __ht, false_type)
1316  {
1317  if (__ht._M_node_allocator() == this->_M_node_allocator())
1318  _M_move_assign(std::move(__ht), true_type{});
1319  else
1320  {
1321  // Can't move memory, move elements then.
1322  _M_assign_elements(std::move(__ht));
1323  __ht.clear();
1324  }
1325  }
1326 
1327  template<typename _Key, typename _Value, typename _Alloc,
1328  typename _ExtractKey, typename _Equal,
1329  typename _Hash, typename _RangeHash, typename _Unused,
1330  typename _RehashPolicy, typename _Traits>
1331  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1332  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1333  _Hashtable(const _Hashtable& __ht)
1334  : __hashtable_base(__ht),
1335  __map_base(__ht),
1336  __rehash_base(__ht),
1337  __hashtable_alloc(
1338  __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())),
1339  _M_buckets(nullptr),
1340  _M_bucket_count(__ht._M_bucket_count),
1341  _M_element_count(__ht._M_element_count),
1342  _M_rehash_policy(__ht._M_rehash_policy)
1343  {
1344  __alloc_node_gen_t __alloc_node_gen(*this);
1345  _M_assign(__ht, __alloc_node_gen);
1346  }
1347 
1348  template<typename _Key, typename _Value, typename _Alloc,
1349  typename _ExtractKey, typename _Equal,
1350  typename _Hash, typename _RangeHash, typename _Unused,
1351  typename _RehashPolicy, typename _Traits>
1352  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1353  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1354  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1355  true_type /* alloc always equal */)
1358  : __hashtable_base(__ht),
1359  __map_base(__ht),
1360  __rehash_base(__ht),
1361  __hashtable_alloc(std::move(__a)),
1362  _M_buckets(__ht._M_buckets),
1363  _M_bucket_count(__ht._M_bucket_count),
1364  _M_before_begin(__ht._M_before_begin._M_nxt),
1365  _M_element_count(__ht._M_element_count),
1366  _M_rehash_policy(__ht._M_rehash_policy)
1367  {
1368  // Update buckets if __ht is using its single bucket.
1369  if (__ht._M_uses_single_bucket())
1370  {
1371  _M_buckets = &_M_single_bucket;
1372  _M_single_bucket = __ht._M_single_bucket;
1373  }
1374 
1375  // Fix bucket containing the _M_before_begin pointer that can't be moved.
1376  _M_update_bbegin();
1377 
1378  __ht._M_reset();
1379  }
1380 
1381  template<typename _Key, typename _Value, typename _Alloc,
1382  typename _ExtractKey, typename _Equal,
1383  typename _Hash, typename _RangeHash, typename _Unused,
1384  typename _RehashPolicy, typename _Traits>
1385  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1386  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1387  _Hashtable(const _Hashtable& __ht, const allocator_type& __a)
1388  : __hashtable_base(__ht),
1389  __map_base(__ht),
1390  __rehash_base(__ht),
1391  __hashtable_alloc(__node_alloc_type(__a)),
1392  _M_buckets(),
1393  _M_bucket_count(__ht._M_bucket_count),
1394  _M_element_count(__ht._M_element_count),
1395  _M_rehash_policy(__ht._M_rehash_policy)
1396  {
1397  __alloc_node_gen_t __alloc_node_gen(*this);
1398  _M_assign(__ht, __alloc_node_gen);
1399  }
1400 
1401  template<typename _Key, typename _Value, typename _Alloc,
1402  typename _ExtractKey, typename _Equal,
1403  typename _Hash, typename _RangeHash, typename _Unused,
1404  typename _RehashPolicy, typename _Traits>
1405  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1406  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1407  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1408  false_type /* alloc always equal */)
1409  : __hashtable_base(__ht),
1410  __map_base(__ht),
1411  __rehash_base(__ht),
1412  __hashtable_alloc(std::move(__a)),
1413  _M_buckets(nullptr),
1414  _M_bucket_count(__ht._M_bucket_count),
1415  _M_element_count(__ht._M_element_count),
1416  _M_rehash_policy(__ht._M_rehash_policy)
1417  {
1418  if (__ht._M_node_allocator() == this->_M_node_allocator())
1419  {
1420  if (__ht._M_uses_single_bucket())
1421  {
1422  _M_buckets = &_M_single_bucket;
1423  _M_single_bucket = __ht._M_single_bucket;
1424  }
1425  else
1426  _M_buckets = __ht._M_buckets;
1427 
1428  // Fix bucket containing the _M_before_begin pointer that can't be
1429  // moved.
1430  _M_update_bbegin(__ht._M_begin());
1431 
1432  __ht._M_reset();
1433  }
1434  else
1435  {
1436  __alloc_node_gen_t __alloc_gen(*this);
1437 
1438  using _Fwd_Ht = typename
1439  conditional<__move_if_noexcept_cond<value_type>::value,
1440  const _Hashtable&, _Hashtable&&>::type;
1441  _M_assign(std::forward<_Fwd_Ht>(__ht), __alloc_gen);
1442  __ht.clear();
1443  }
1444  }
1445 
1446  template<typename _Key, typename _Value, typename _Alloc,
1447  typename _ExtractKey, typename _Equal,
1448  typename _Hash, typename _RangeHash, typename _Unused,
1449  typename _RehashPolicy, typename _Traits>
1450  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1451  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1452  ~_Hashtable() noexcept
1453  {
1454  clear();
1455  _M_deallocate_buckets();
1456  }
1457 
1458  template<typename _Key, typename _Value, typename _Alloc,
1459  typename _ExtractKey, typename _Equal,
1460  typename _Hash, typename _RangeHash, typename _Unused,
1461  typename _RehashPolicy, typename _Traits>
1462  void
1463  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1464  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1465  swap(_Hashtable& __x)
1466  noexcept(__and_<__is_nothrow_swappable<_Hash>,
1467  __is_nothrow_swappable<_Equal>>::value)
1468  {
1469  // The only base class with member variables is hash_code_base.
1470  // We define _Hash_code_base::_M_swap because different
1471  // specializations have different members.
1472  this->_M_swap(__x);
1473 
1474  std::__alloc_on_swap(this->_M_node_allocator(), __x._M_node_allocator());
1475  std::swap(_M_rehash_policy, __x._M_rehash_policy);
1476 
1477  // Deal properly with potentially moved instances.
1478  if (this->_M_uses_single_bucket())
1479  {
1480  if (!__x._M_uses_single_bucket())
1481  {
1482  _M_buckets = __x._M_buckets;
1483  __x._M_buckets = &__x._M_single_bucket;
1484  }
1485  }
1486  else if (__x._M_uses_single_bucket())
1487  {
1488  __x._M_buckets = _M_buckets;
1489  _M_buckets = &_M_single_bucket;
1490  }
1491  else
1492  std::swap(_M_buckets, __x._M_buckets);
1493 
1494  std::swap(_M_bucket_count, __x._M_bucket_count);
1495  std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt);
1496  std::swap(_M_element_count, __x._M_element_count);
1497  std::swap(_M_single_bucket, __x._M_single_bucket);
1498 
1499  // Fix buckets containing the _M_before_begin pointers that can't be
1500  // swapped.
1501  _M_update_bbegin();
1502  __x._M_update_bbegin();
1503  }
1504 
1505  template<typename _Key, typename _Value, typename _Alloc,
1506  typename _ExtractKey, typename _Equal,
1507  typename _Hash, typename _RangeHash, typename _Unused,
1508  typename _RehashPolicy, typename _Traits>
1509  auto
1510  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1511  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1512  find(const key_type& __k)
1513  -> iterator
1514  {
1515  __hash_code __code = this->_M_hash_code(__k);
1516  std::size_t __bkt = _M_bucket_index(__code);
1517  return iterator(_M_find_node(__bkt, __k, __code));
1518  }
1519 
1520  template<typename _Key, typename _Value, typename _Alloc,
1521  typename _ExtractKey, typename _Equal,
1522  typename _Hash, typename _RangeHash, typename _Unused,
1523  typename _RehashPolicy, typename _Traits>
1524  auto
1525  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1526  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1527  find(const key_type& __k) const
1528  -> const_iterator
1529  {
1530  __hash_code __code = this->_M_hash_code(__k);
1531  std::size_t __bkt = _M_bucket_index(__code);
1532  return const_iterator(_M_find_node(__bkt, __k, __code));
1533  }
1534 
1535  template<typename _Key, typename _Value, typename _Alloc,
1536  typename _ExtractKey, typename _Equal,
1537  typename _Hash, typename _RangeHash, typename _Unused,
1538  typename _RehashPolicy, typename _Traits>
1539  auto
1540  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1541  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1542  count(const key_type& __k) const
1543  -> size_type
1544  {
1545  auto __it = find(__k);
1546  if (!__it._M_cur)
1547  return 0;
1548 
1549  if (__unique_keys::value)
1550  return 1;
1551 
1552  // All equivalent values are next to each other, if we find a
1553  // non-equivalent value after an equivalent one it means that we won't
1554  // find any new equivalent value.
1555  size_type __result = 1;
1556  for (auto __ref = __it++;
1557  __it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
1558  ++__it)
1559  ++__result;
1560 
1561  return __result;
1562  }
1563 
1564  template<typename _Key, typename _Value, typename _Alloc,
1565  typename _ExtractKey, typename _Equal,
1566  typename _Hash, typename _RangeHash, typename _Unused,
1567  typename _RehashPolicy, typename _Traits>
1568  auto
1569  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1570  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1571  equal_range(const key_type& __k)
1572  -> pair<iterator, iterator>
1573  {
1574  auto __ite = find(__k);
1575  if (!__ite._M_cur)
1576  return { __ite, __ite };
1577 
1578  auto __beg = __ite++;
1579  if (__unique_keys::value)
1580  return { __beg, __ite };
1581 
1582  // All equivalent values are next to each other, if we find a
1583  // non-equivalent value after an equivalent one it means that we won't
1584  // find any new equivalent value.
1585  while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1586  ++__ite;
1587 
1588  return { __beg, __ite };
1589  }
1590 
1591  template<typename _Key, typename _Value, typename _Alloc,
1592  typename _ExtractKey, typename _Equal,
1593  typename _Hash, typename _RangeHash, typename _Unused,
1594  typename _RehashPolicy, typename _Traits>
1595  auto
1596  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1597  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1598  equal_range(const key_type& __k) const
1599  -> pair<const_iterator, const_iterator>
1600  {
1601  auto __ite = find(__k);
1602  if (!__ite._M_cur)
1603  return { __ite, __ite };
1604 
1605  auto __beg = __ite++;
1606  if (__unique_keys::value)
1607  return { __beg, __ite };
1608 
1609  // All equivalent values are next to each other, if we find a
1610  // non-equivalent value after an equivalent one it means that we won't
1611  // find any new equivalent value.
1612  while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1613  ++__ite;
1614 
1615  return { __beg, __ite };
1616  }
1617 
1618  // Find the node before the one whose key compares equal to k in the bucket
1619  // bkt. Return nullptr if no node is found.
1620  template<typename _Key, typename _Value, typename _Alloc,
1621  typename _ExtractKey, typename _Equal,
1622  typename _Hash, typename _RangeHash, typename _Unused,
1623  typename _RehashPolicy, typename _Traits>
1624  auto
1625  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1626  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1627  _M_find_before_node(size_type __bkt, const key_type& __k,
1628  __hash_code __code) const
1629  -> __node_base_ptr
1630  {
1631  __node_base_ptr __prev_p = _M_buckets[__bkt];
1632  if (!__prev_p)
1633  return nullptr;
1634 
1635  for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
1636  __p = __p->_M_next())
1637  {
1638  if (this->_M_equals(__k, __code, *__p))
1639  return __prev_p;
1640 
1641  if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
1642  break;
1643  __prev_p = __p;
1644  }
1645 
1646  return nullptr;
1647  }
1648 
1649  template<typename _Key, typename _Value, typename _Alloc,
1650  typename _ExtractKey, typename _Equal,
1651  typename _Hash, typename _RangeHash, typename _Unused,
1652  typename _RehashPolicy, typename _Traits>
1653  void
1654  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1655  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1656  _M_insert_bucket_begin(size_type __bkt, __node_ptr __node)
1657  {
1658  if (_M_buckets[__bkt])
1659  {
1660  // Bucket is not empty, we just need to insert the new node
1661  // after the bucket before begin.
1662  __node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
1663  _M_buckets[__bkt]->_M_nxt = __node;
1664  }
1665  else
1666  {
1667  // The bucket is empty, the new node is inserted at the
1668  // beginning of the singly-linked list and the bucket will
1669  // contain _M_before_begin pointer.
1670  __node->_M_nxt = _M_before_begin._M_nxt;
1671  _M_before_begin._M_nxt = __node;
1672 
1673  if (__node->_M_nxt)
1674  // We must update former begin bucket that is pointing to
1675  // _M_before_begin.
1676  _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
1677 
1678  _M_buckets[__bkt] = &_M_before_begin;
1679  }
1680  }
1681 
1682  template<typename _Key, typename _Value, typename _Alloc,
1683  typename _ExtractKey, typename _Equal,
1684  typename _Hash, typename _RangeHash, typename _Unused,
1685  typename _RehashPolicy, typename _Traits>
1686  void
1687  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1688  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1689  _M_remove_bucket_begin(size_type __bkt, __node_ptr __next,
1690  size_type __next_bkt)
1691  {
1692  if (!__next || __next_bkt != __bkt)
1693  {
1694  // Bucket is now empty
1695  // First update next bucket if any
1696  if (__next)
1697  _M_buckets[__next_bkt] = _M_buckets[__bkt];
1698 
1699  // Second update before begin node if necessary
1700  if (&_M_before_begin == _M_buckets[__bkt])
1701  _M_before_begin._M_nxt = __next;
1702  _M_buckets[__bkt] = nullptr;
1703  }
1704  }
1705 
1706  template<typename _Key, typename _Value, typename _Alloc,
1707  typename _ExtractKey, typename _Equal,
1708  typename _Hash, typename _RangeHash, typename _Unused,
1709  typename _RehashPolicy, typename _Traits>
1710  auto
1711  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1712  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1713  _M_get_previous_node(size_type __bkt, __node_ptr __n)
1714  -> __node_base_ptr
1715  {
1716  __node_base_ptr __prev_n = _M_buckets[__bkt];
1717  while (__prev_n->_M_nxt != __n)
1718  __prev_n = __prev_n->_M_nxt;
1719  return __prev_n;
1720  }
1721 
1722  template<typename _Key, typename _Value, typename _Alloc,
1723  typename _ExtractKey, typename _Equal,
1724  typename _Hash, typename _RangeHash, typename _Unused,
1725  typename _RehashPolicy, typename _Traits>
1726  template<typename... _Args>
1727  auto
1728  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1729  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1730  _M_emplace(true_type /* __uks */, _Args&&... __args)
1731  -> pair<iterator, bool>
1732  {
1733  // First build the node to get access to the hash code
1734  _Scoped_node __node { this, std::forward<_Args>(__args)... };
1735  const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
1736  __hash_code __code = this->_M_hash_code(__k);
1737  size_type __bkt = _M_bucket_index(__code);
1738  if (__node_ptr __p = _M_find_node(__bkt, __k, __code))
1739  // There is already an equivalent node, no insertion
1740  return std::make_pair(iterator(__p), false);
1741 
1742  // Insert the node
1743  auto __pos = _M_insert_unique_node(__bkt, __code, __node._M_node);
1744  __node._M_node = nullptr;
1745  return { __pos, true };
1746  }
1747 
1748  template<typename _Key, typename _Value, typename _Alloc,
1749  typename _ExtractKey, typename _Equal,
1750  typename _Hash, typename _RangeHash, typename _Unused,
1751  typename _RehashPolicy, typename _Traits>
1752  template<typename... _Args>
1753  auto
1754  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1755  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1756  _M_emplace(const_iterator __hint, false_type /* __uks */,
1757  _Args&&... __args)
1758  -> iterator
1759  {
1760  // First build the node to get its hash code.
1761  _Scoped_node __node { this, std::forward<_Args>(__args)... };
1762  const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
1763 
1764  __hash_code __code = this->_M_hash_code(__k);
1765  auto __pos
1766  = _M_insert_multi_node(__hint._M_cur, __code, __node._M_node);
1767  __node._M_node = nullptr;
1768  return __pos;
1769  }
1770 
1771  template<typename _Key, typename _Value, typename _Alloc,
1772  typename _ExtractKey, typename _Equal,
1773  typename _Hash, typename _RangeHash, typename _Unused,
1774  typename _RehashPolicy, typename _Traits>
1775  auto
1776  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1777  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1778  _M_insert_unique_node(size_type __bkt, __hash_code __code,
1779  __node_ptr __node, size_type __n_elt)
1780  -> iterator
1781  {
1782  const __rehash_state& __saved_state = _M_rehash_policy._M_state();
1783  std::pair<bool, std::size_t> __do_rehash
1784  = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count,
1785  __n_elt);
1786 
1787  if (__do_rehash.first)
1788  {
1789  _M_rehash(__do_rehash.second, __saved_state);
1790  __bkt = _M_bucket_index(__code);
1791  }
1792 
1793  this->_M_store_code(*__node, __code);
1794 
1795  // Always insert at the beginning of the bucket.
1796  _M_insert_bucket_begin(__bkt, __node);
1797  ++_M_element_count;
1798  return iterator(__node);
1799  }
1800 
1801  template<typename _Key, typename _Value, typename _Alloc,
1802  typename _ExtractKey, typename _Equal,
1803  typename _Hash, typename _RangeHash, typename _Unused,
1804  typename _RehashPolicy, typename _Traits>
1805  auto
1806  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1807  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1808  _M_insert_multi_node(__node_ptr __hint,
1809  __hash_code __code, __node_ptr __node)
1810  -> iterator
1811  {
1812  const __rehash_state& __saved_state = _M_rehash_policy._M_state();
1813  std::pair<bool, std::size_t> __do_rehash
1814  = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1);
1815 
1816  if (__do_rehash.first)
1817  _M_rehash(__do_rehash.second, __saved_state);
1818 
1819  this->_M_store_code(*__node, __code);
1820  const key_type& __k = _ExtractKey{}(__node->_M_v());
1821  size_type __bkt = _M_bucket_index(__code);
1822 
1823  // Find the node before an equivalent one or use hint if it exists and
1824  // if it is equivalent.
1825  __node_base_ptr __prev
1826  = __builtin_expect(__hint != nullptr, false)
1827  && this->_M_equals(__k, __code, *__hint)
1828  ? __hint
1829  : _M_find_before_node(__bkt, __k, __code);
1830 
1831  if (__prev)
1832  {
1833  // Insert after the node before the equivalent one.
1834  __node->_M_nxt = __prev->_M_nxt;
1835  __prev->_M_nxt = __node;
1836  if (__builtin_expect(__prev == __hint, false))
1837  // hint might be the last bucket node, in this case we need to
1838  // update next bucket.
1839  if (__node->_M_nxt
1840  && !this->_M_equals(__k, __code, *__node->_M_next()))
1841  {
1842  size_type __next_bkt = _M_bucket_index(*__node->_M_next());
1843  if (__next_bkt != __bkt)
1844  _M_buckets[__next_bkt] = __node;
1845  }
1846  }
1847  else
1848  // The inserted node has no equivalent in the hashtable. We must
1849  // insert the new node at the beginning of the bucket to preserve
1850  // equivalent elements' relative positions.
1851  _M_insert_bucket_begin(__bkt, __node);
1852  ++_M_element_count;
1853  return iterator(__node);
1854  }
1855 
1856  // Insert v if no element with its key is already present.
1857  template<typename _Key, typename _Value, typename _Alloc,
1858  typename _ExtractKey, typename _Equal,
1859  typename _Hash, typename _RangeHash, typename _Unused,
1860  typename _RehashPolicy, typename _Traits>
1861  template<typename _Arg, typename _NodeGenerator>
1862  auto
1863  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1864  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1865  _M_insert(_Arg&& __v, const _NodeGenerator& __node_gen,
1866  true_type /* __uks */)
1867  -> pair<iterator, bool>
1868  {
1869  const key_type& __k = _ExtractKey{}(__v);
1870  __hash_code __code = this->_M_hash_code(__k);
1871  size_type __bkt = _M_bucket_index(__code);
1872 
1873  if (__node_ptr __node = _M_find_node(__bkt, __k, __code))
1874  return { iterator(__node), false };
1875 
1876  _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
1877  auto __pos
1878  = _M_insert_unique_node(__bkt, __code, __node._M_node);
1879  __node._M_node = nullptr;
1880  return { __pos, true };
1881  }
1882 
1883  // Insert v unconditionally.
1884  template<typename _Key, typename _Value, typename _Alloc,
1885  typename _ExtractKey, typename _Equal,
1886  typename _Hash, typename _RangeHash, typename _Unused,
1887  typename _RehashPolicy, typename _Traits>
1888  template<typename _Arg, typename _NodeGenerator>
1889  auto
1890  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1891  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1892  _M_insert(const_iterator __hint, _Arg&& __v,
1893  const _NodeGenerator& __node_gen,
1894  false_type /* __uks */)
1895  -> iterator
1896  {
1897  // First compute the hash code so that we don't do anything if it
1898  // throws.
1899  __hash_code __code = this->_M_hash_code(_ExtractKey{}(__v));
1900 
1901  // Second allocate new node so that we don't rehash if it throws.
1902  _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
1903  auto __pos
1904  = _M_insert_multi_node(__hint._M_cur, __code, __node._M_node);
1905  __node._M_node = nullptr;
1906  return __pos;
1907  }
1908 
1909  template<typename _Key, typename _Value, typename _Alloc,
1910  typename _ExtractKey, typename _Equal,
1911  typename _Hash, typename _RangeHash, typename _Unused,
1912  typename _RehashPolicy, typename _Traits>
1913  auto
1914  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1915  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1916  erase(const_iterator __it)
1917  -> iterator
1918  {
1919  __node_ptr __n = __it._M_cur;
1920  std::size_t __bkt = _M_bucket_index(*__n);
1921 
1922  // Look for previous node to unlink it from the erased one, this
1923  // is why we need buckets to contain the before begin to make
1924  // this search fast.
1925  __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
1926  return _M_erase(__bkt, __prev_n, __n);
1927  }
1928 
1929  template<typename _Key, typename _Value, typename _Alloc,
1930  typename _ExtractKey, typename _Equal,
1931  typename _Hash, typename _RangeHash, typename _Unused,
1932  typename _RehashPolicy, typename _Traits>
1933  auto
1934  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1935  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1936  _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
1937  -> iterator
1938  {
1939  if (__prev_n == _M_buckets[__bkt])
1940  _M_remove_bucket_begin(__bkt, __n->_M_next(),
1941  __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
1942  else if (__n->_M_nxt)
1943  {
1944  size_type __next_bkt = _M_bucket_index(*__n->_M_next());
1945  if (__next_bkt != __bkt)
1946  _M_buckets[__next_bkt] = __prev_n;
1947  }
1948 
1949  __prev_n->_M_nxt = __n->_M_nxt;
1950  iterator __result(__n->_M_next());
1951  this->_M_deallocate_node(__n);
1952  --_M_element_count;
1953 
1954  return __result;
1955  }
1956 
1957  template<typename _Key, typename _Value, typename _Alloc,
1958  typename _ExtractKey, typename _Equal,
1959  typename _Hash, typename _RangeHash, typename _Unused,
1960  typename _RehashPolicy, typename _Traits>
1961  auto
1962  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1963  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1964  _M_erase(true_type /* __uks */, const key_type& __k)
1965  -> size_type
1966  {
1967  __hash_code __code = this->_M_hash_code(__k);
1968  std::size_t __bkt = _M_bucket_index(__code);
1969 
1970  // Look for the node before the first matching node.
1971  __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
1972  if (!__prev_n)
1973  return 0;
1974 
1975  // We found a matching node, erase it.
1976  __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
1977  _M_erase(__bkt, __prev_n, __n);
1978  return 1;
1979  }
1980 
1981  template<typename _Key, typename _Value, typename _Alloc,
1982  typename _ExtractKey, typename _Equal,
1983  typename _Hash, typename _RangeHash, typename _Unused,
1984  typename _RehashPolicy, typename _Traits>
1985  auto
1986  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1987  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1988  _M_erase(false_type /* __uks */, const key_type& __k)
1989  -> size_type
1990  {
1991  __hash_code __code = this->_M_hash_code(__k);
1992  std::size_t __bkt = _M_bucket_index(__code);
1993 
1994  // Look for the node before the first matching node.
1995  __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
1996  if (!__prev_n)
1997  return 0;
1998 
1999  // _GLIBCXX_RESOLVE_LIB_DEFECTS
2000  // 526. Is it undefined if a function in the standard changes
2001  // in parameters?
2002  // We use one loop to find all matching nodes and another to deallocate
2003  // them so that the key stays valid during the first loop. It might be
2004  // invalidated indirectly when destroying nodes.
2005  __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2006  __node_ptr __n_last = __n->_M_next();
2007  while (__n_last && this->_M_node_equals(*__n, *__n_last))
2008  __n_last = __n_last->_M_next();
2009 
2010  std::size_t __n_last_bkt = __n_last ? _M_bucket_index(*__n_last) : __bkt;
2011 
2012  // Deallocate nodes.
2013  size_type __result = 0;
2014  do
2015  {
2016  __node_ptr __p = __n->_M_next();
2017  this->_M_deallocate_node(__n);
2018  __n = __p;
2019  ++__result;
2020  }
2021  while (__n != __n_last);
2022 
2023  _M_element_count -= __result;
2024  if (__prev_n == _M_buckets[__bkt])
2025  _M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
2026  else if (__n_last_bkt != __bkt)
2027  _M_buckets[__n_last_bkt] = __prev_n;
2028  __prev_n->_M_nxt = __n_last;
2029  return __result;
2030  }
2031 
2032  template<typename _Key, typename _Value, typename _Alloc,
2033  typename _ExtractKey, typename _Equal,
2034  typename _Hash, typename _RangeHash, typename _Unused,
2035  typename _RehashPolicy, typename _Traits>
2036  auto
2037  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2038  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2039  erase(const_iterator __first, const_iterator __last)
2040  -> iterator
2041  {
2042  __node_ptr __n = __first._M_cur;
2043  __node_ptr __last_n = __last._M_cur;
2044  if (__n == __last_n)
2045  return iterator(__n);
2046 
2047  std::size_t __bkt = _M_bucket_index(*__n);
2048 
2049  __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2050  bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
2051  std::size_t __n_bkt = __bkt;
2052  for (;;)
2053  {
2054  do
2055  {
2056  __node_ptr __tmp = __n;
2057  __n = __n->_M_next();
2058  this->_M_deallocate_node(__tmp);
2059  --_M_element_count;
2060  if (!__n)
2061  break;
2062  __n_bkt = _M_bucket_index(*__n);
2063  }
2064  while (__n != __last_n && __n_bkt == __bkt);
2065  if (__is_bucket_begin)
2066  _M_remove_bucket_begin(__bkt, __n, __n_bkt);
2067  if (__n == __last_n)
2068  break;
2069  __is_bucket_begin = true;
2070  __bkt = __n_bkt;
2071  }
2072 
2073  if (__n && (__n_bkt != __bkt || __is_bucket_begin))
2074  _M_buckets[__n_bkt] = __prev_n;
2075  __prev_n->_M_nxt = __n;
2076  return iterator(__n);
2077  }
2078 
2079  template<typename _Key, typename _Value, typename _Alloc,
2080  typename _ExtractKey, typename _Equal,
2081  typename _Hash, typename _RangeHash, typename _Unused,
2082  typename _RehashPolicy, typename _Traits>
2083  void
2084  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2085  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2086  clear() noexcept
2087  {
2088  this->_M_deallocate_nodes(_M_begin());
2089  __builtin_memset(_M_buckets, 0,
2090  _M_bucket_count * sizeof(__node_base_ptr));
2091  _M_element_count = 0;
2092  _M_before_begin._M_nxt = nullptr;
2093  }
2094 
2095  template<typename _Key, typename _Value, typename _Alloc,
2096  typename _ExtractKey, typename _Equal,
2097  typename _Hash, typename _RangeHash, typename _Unused,
2098  typename _RehashPolicy, typename _Traits>
2099  void
2100  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2101  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2102  rehash(size_type __bkt_count)
2103  {
2104  const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2105  __bkt_count
2106  = std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
2107  __bkt_count);
2108  __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
2109 
2110  if (__bkt_count != _M_bucket_count)
2111  _M_rehash(__bkt_count, __saved_state);
2112  else
2113  // No rehash, restore previous state to keep it consistent with
2114  // container state.
2115  _M_rehash_policy._M_reset(__saved_state);
2116  }
2117 
2118  template<typename _Key, typename _Value, typename _Alloc,
2119  typename _ExtractKey, typename _Equal,
2120  typename _Hash, typename _RangeHash, typename _Unused,
2121  typename _RehashPolicy, typename _Traits>
2122  void
2123  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2124  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2125  _M_rehash(size_type __bkt_count, const __rehash_state& __state)
2126  {
2127  __try
2128  {
2129  _M_rehash_aux(__bkt_count, __unique_keys{});
2130  }
2131  __catch(...)
2132  {
2133  // A failure here means that buckets allocation failed. We only
2134  // have to restore hash policy previous state.
2135  _M_rehash_policy._M_reset(__state);
2136  __throw_exception_again;
2137  }
2138  }
2139 
2140  // Rehash when there is no equivalent elements.
2141  template<typename _Key, typename _Value, typename _Alloc,
2142  typename _ExtractKey, typename _Equal,
2143  typename _Hash, typename _RangeHash, typename _Unused,
2144  typename _RehashPolicy, typename _Traits>
2145  void
2146  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2147  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2148  _M_rehash_aux(size_type __bkt_count, true_type /* __uks */)
2149  {
2150  __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2151  __node_ptr __p = _M_begin();
2152  _M_before_begin._M_nxt = nullptr;
2153  std::size_t __bbegin_bkt = 0;
2154  while (__p)
2155  {
2156  __node_ptr __next = __p->_M_next();
2157  std::size_t __bkt
2158  = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2159  if (!__new_buckets[__bkt])
2160  {
2161  __p->_M_nxt = _M_before_begin._M_nxt;
2162  _M_before_begin._M_nxt = __p;
2163  __new_buckets[__bkt] = &_M_before_begin;
2164  if (__p->_M_nxt)
2165  __new_buckets[__bbegin_bkt] = __p;
2166  __bbegin_bkt = __bkt;
2167  }
2168  else
2169  {
2170  __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2171  __new_buckets[__bkt]->_M_nxt = __p;
2172  }
2173 
2174  __p = __next;
2175  }
2176 
2177  _M_deallocate_buckets();
2178  _M_bucket_count = __bkt_count;
2179  _M_buckets = __new_buckets;
2180  }
2181 
2182  // Rehash when there can be equivalent elements, preserve their relative
2183  // order.
2184  template<typename _Key, typename _Value, typename _Alloc,
2185  typename _ExtractKey, typename _Equal,
2186  typename _Hash, typename _RangeHash, typename _Unused,
2187  typename _RehashPolicy, typename _Traits>
2188  void
2189  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2190  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2191  _M_rehash_aux(size_type __bkt_count, false_type /* __uks */)
2192  {
2193  __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2194  __node_ptr __p = _M_begin();
2195  _M_before_begin._M_nxt = nullptr;
2196  std::size_t __bbegin_bkt = 0;
2197  std::size_t __prev_bkt = 0;
2198  __node_ptr __prev_p = nullptr;
2199  bool __check_bucket = false;
2200 
2201  while (__p)
2202  {
2203  __node_ptr __next = __p->_M_next();
2204  std::size_t __bkt
2205  = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2206 
2207  if (__prev_p && __prev_bkt == __bkt)
2208  {
2209  // Previous insert was already in this bucket, we insert after
2210  // the previously inserted one to preserve equivalent elements
2211  // relative order.
2212  __p->_M_nxt = __prev_p->_M_nxt;
2213  __prev_p->_M_nxt = __p;
2214 
2215  // Inserting after a node in a bucket require to check that we
2216  // haven't change the bucket last node, in this case next
2217  // bucket containing its before begin node must be updated. We
2218  // schedule a check as soon as we move out of the sequence of
2219  // equivalent nodes to limit the number of checks.
2220  __check_bucket = true;
2221  }
2222  else
2223  {
2224  if (__check_bucket)
2225  {
2226  // Check if we shall update the next bucket because of
2227  // insertions into __prev_bkt bucket.
2228  if (__prev_p->_M_nxt)
2229  {
2230  std::size_t __next_bkt
2231  = __hash_code_base::_M_bucket_index(
2232  *__prev_p->_M_next(), __bkt_count);
2233  if (__next_bkt != __prev_bkt)
2234  __new_buckets[__next_bkt] = __prev_p;
2235  }
2236  __check_bucket = false;
2237  }
2238 
2239  if (!__new_buckets[__bkt])
2240  {
2241  __p->_M_nxt = _M_before_begin._M_nxt;
2242  _M_before_begin._M_nxt = __p;
2243  __new_buckets[__bkt] = &_M_before_begin;
2244  if (__p->_M_nxt)
2245  __new_buckets[__bbegin_bkt] = __p;
2246  __bbegin_bkt = __bkt;
2247  }
2248  else
2249  {
2250  __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2251  __new_buckets[__bkt]->_M_nxt = __p;
2252  }
2253  }
2254  __prev_p = __p;
2255  __prev_bkt = __bkt;
2256  __p = __next;
2257  }
2258 
2259  if (__check_bucket && __prev_p->_M_nxt)
2260  {
2261  std::size_t __next_bkt
2262  = __hash_code_base::_M_bucket_index(*__prev_p->_M_next(),
2263  __bkt_count);
2264  if (__next_bkt != __prev_bkt)
2265  __new_buckets[__next_bkt] = __prev_p;
2266  }
2267 
2268  _M_deallocate_buckets();
2269  _M_bucket_count = __bkt_count;
2270  _M_buckets = __new_buckets;
2271  }
2272 
2273 #if __cplusplus > 201402L
2274  template<typename, typename, typename> class _Hash_merge_helper { };
2275 #endif // C++17
2276 
2277 #if __cpp_deduction_guides >= 201606
2278  // Used to constrain deduction guides
2279  template<typename _Hash>
2280  using _RequireNotAllocatorOrIntegral
2281  = __enable_if_t<!__or_<is_integral<_Hash>, __is_allocator<_Hash>>::value>;
2282 #endif
2283 
2284 _GLIBCXX_END_NAMESPACE_VERSION
2285 } // namespace std
2286 
2287 #endif // _HASHTABLE_H
std::_Hashtable::_M_reinsert_node_multi
iterator _M_reinsert_node_multi(const_iterator __hint, node_type &&__nh)
Re-insert an extracted node into a container with equivalent keys.
Definition: bits/hashtable.h:912
__gnu_cxx::__alloc_traits
Uniform interface to C++98 and C++11 allocators.
Definition: ext/alloc_traits.h:52
std::conditional
Define a member typedef type to one of two argument types.
Definition: type_traits:2161
std::_Hashtable::_M_reinsert_node
insert_return_type _M_reinsert_node(node_type &&__nh)
Re-insert an extracted node into a container with unique keys.
Definition: bits/hashtable.h:881
std::is_nothrow_default_constructible
is_nothrow_default_constructible
Definition: type_traits:986
node_handle.h
std::__detail::_Hash_node_base
Definition: hashtable_policy.h:215
std
ISO C++ entities toplevel namespace is std.
std::__detail::_Rehash_base
Definition: hashtable_policy.h:1065
std::shared_mutex::swap
void swap(shared_lock< _Mutex > &__x, shared_lock< _Mutex > &__y) noexcept
Swap specialization for shared_lock.
Definition: shared_mutex:851
std::__detail::_Hashtable_alloc
Definition: hashtable_policy.h:1780
std::is_nothrow_copy_constructible
is_nothrow_copy_constructible
Definition: type_traits:1008
std::distance
constexpr iterator_traits< _InputIterator >::difference_type distance(_InputIterator __first, _InputIterator __last)
A generalization of pointer arithmetic.
Definition: stl_iterator_base_funcs.h:138
std::pair::first
_T1 first
The first member.
Definition: stl_pair.h:217
std::true_type
integral_constant< bool, true > true_type
The type used as a compile-time boolean with true value.
Definition: type_traits:75
std::_Hashtable::extract
node_type extract(const _Key &__k)
Extract a node.
Definition: bits/hashtable.h:960
std::_Node_insert_return
Return type of insert(node_handle&&) on unique maps/sets.
Definition: node_handle.h:364
std::integral_constant
integral_constant
Definition: type_traits:58
std::pair
Struct holding two objects of arbitrary type.
Definition: stl_pair.h:213
std::__addressof
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition: move.h:49
std::__detail::_Local_iterator
local iterators
Definition: hashtable_policy.h:1431
std::cend
constexpr auto cend(const _Container &__cont) noexcept(noexcept(std::end(__cont))) -> decltype(std::end(__cont))
Return an iterator pointing to one past the last element of the const container.
Definition: range_access.h:129
std::size
constexpr auto size(const _Container &__cont) noexcept(noexcept(__cont.size())) -> decltype(__cont.size())
Return the size of a container.
Definition: range_access.h:244
std::max
constexpr const _Tp & max(const _Tp &, const _Tp &)
This does what you think it does.
Definition: stl_algobase.h:254
std::is_nothrow_move_assignable
is_nothrow_move_assignable
Definition: type_traits:1135
std::cbegin
constexpr auto cbegin(const _Container &__cont) noexcept(noexcept(std::begin(__cont))) -> decltype(std::begin(__cont))
Return an iterator pointing to the first element of the const container.
Definition: range_access.h:118
std::end
_Tp * end(valarray< _Tp > &__va)
Return an iterator pointing to one past the last element of the valarray.
Definition: valarray:1234
std::move
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition: move.h:101
std::__detail::_Insert_base
Definition: hashtable_policy.h:791
operator=
auto_ptr & operator=(auto_ptr &__a)
auto_ptr assignment operator.
Definition: auto_ptr.h:47
std::_Node_handle
Node handle type for maps.
Definition: node_handle.h:222
std::pair::second
_T2 second
The second member.
Definition: stl_pair.h:218
std::empty
constexpr auto empty(const _Container &__cont) noexcept(noexcept(__cont.empty())) -> decltype(__cont.empty())
Return whether a container is empty.
Definition: range_access.h:262
std::swap
void swap(any &__x, any &__y) noexcept
Exchange the states of two any objects.
Definition: any:412
std::initializer_list
initializer_list
Definition: initializer_list:48
std::_Hashtable::_M_merge_unique
void _M_merge_unique(_Compatible_Hashtable &__src) noexcept
Merge from a compatible container into one with unique keys.
Definition: bits/hashtable.h:973
std::__detail::_Equality
Definition: hashtable_policy.h:1643
std::iterator
Common iterator class.
Definition: stl_iterator_base_types.h:128
std::_Hashtable::_M_merge_multi
void _M_merge_multi(_Compatible_Hashtable &__src) noexcept
Merge from a compatible container into one with equivalent keys.
Definition: bits/hashtable.h:1001
std::__detail::_Map_base
Definition: hashtable_policy.h:644
std::_Hashtable
Definition: bits/hashtable.h:187
std::__detail::_Hash_code_base
Definition: hashtable_policy.h:1191
std::__detail::_Local_const_iterator
local const_iterators
Definition: hashtable_policy.h:1487
hashtable_policy.h
std::begin
_Tp * begin(valarray< _Tp > &__va)
Return an iterator pointing to the first element of the valarray.
Definition: valarray:1214
std::__detail::_Insert
Definition: hashtable_policy.h:954
std::__detail::_Hashtable_base
Definition: hashtable_policy.h:1556
std::false_type
integral_constant< bool, false > false_type
The type used as a compile-time boolean with false value.
Definition: type_traits:78
std::is_same
is_same
Definition: type_traits:1360
std::__detail::_Hash_node
Definition: hashtable_policy.h:279