Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
_concurrent_unordered_impl.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 /* Container implementations in this header are based on PPL implementations
18  provided by Microsoft. */
19 
20 #ifndef __TBB__concurrent_unordered_impl_H
21 #define __TBB__concurrent_unordered_impl_H
22 #if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H) && !defined(__TBB_concurrent_hash_map_H)
23 #error Do not #include this internal file directly; use public TBB headers instead.
24 #endif
25 
26 #include "../tbb_stddef.h"
27 
28 #include <iterator>
29 #include <utility> // Need std::pair
30 #include <functional> // Need std::equal_to (in ../concurrent_unordered_*.h)
31 #include <string> // For tbb_hasher
32 #include <cstring> // Need std::memset
33 #include __TBB_STD_SWAP_HEADER
34 
35 #include "../atomic.h"
36 #include "../tbb_exception.h"
37 #include "../tbb_allocator.h"
38 
39 #if __TBB_INITIALIZER_LISTS_PRESENT
40  #include <initializer_list>
41 #endif
42 
43 #if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_COPY_DELETION_BROKEN
44  #define __TBB_UNORDERED_NODE_HANDLE_PRESENT 1
45 #endif
46 
47 #include "_allocator_traits.h"
48 #include "_tbb_hash_compare_impl.h"
49 #include "_template_helpers.h"
50 
51 namespace tbb {
52 namespace interface5 {
54 namespace internal {
55 
56 template <typename T, typename Allocator>
58 template <typename Traits>
60 
61 // Forward list iterators (without skipping dummy elements)
62 template<class Solist, typename Value>
63 class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
64 {
65  template <typename T, typename Allocator>
66  friend class split_ordered_list;
67  template <typename Traits>
69  template<class M, typename V>
70  friend class flist_iterator;
71 
72  typedef typename Solist::nodeptr_t nodeptr_t;
73 public:
74  typedef typename Solist::value_type value_type;
75  typedef typename Solist::difference_type difference_type;
76  typedef typename Solist::pointer pointer;
77  typedef typename Solist::reference reference;
78 
81  : my_node_ptr(other.my_node_ptr) {}
82 
83  reference operator*() const { return my_node_ptr->my_element; }
84  pointer operator->() const { return &**this; }
85 
87  my_node_ptr = my_node_ptr->my_next;
88  return *this;
89  }
90 
92  flist_iterator tmp = *this;
93  ++*this;
94  return tmp;
95  }
96 
97 protected:
99  nodeptr_t get_node_ptr() const { return my_node_ptr; }
100 
102 
103  template<typename M, typename T, typename U>
104  friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
105  template<typename M, typename T, typename U>
106  friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
107 };
108 
109 template<typename Solist, typename T, typename U>
111  return i.my_node_ptr == j.my_node_ptr;
112 }
113 template<typename Solist, typename T, typename U>
115  return i.my_node_ptr != j.my_node_ptr;
116 }
117 
118 // Split-order list iterators, needed to skip dummy elements
119 template<class Solist, typename Value>
120 class solist_iterator : public flist_iterator<Solist, Value>
121 {
123  typedef typename Solist::nodeptr_t nodeptr_t;
125  template <typename T, typename Allocator>
126  friend class split_ordered_list;
127  template<class M, typename V>
128  friend class solist_iterator;
129  template <typename Traits>
131  template<typename M, typename T, typename U>
132  friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
133  template<typename M, typename T, typename U>
134  friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
135 
136  const Solist *my_list_ptr;
137  solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
138 
139 public:
140  typedef typename Solist::value_type value_type;
141  typedef typename Solist::difference_type difference_type;
142  typedef typename Solist::pointer pointer;
143  typedef typename Solist::reference reference;
144 
147  : base_type(other), my_list_ptr(other.my_list_ptr) {}
148 
150  return this->base_type::operator*();
151  }
152 
153  pointer operator->() const {
154  return (&**this);
155  }
156 
158  do ++(*(base_type *)this);
159  while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
160 
161  return (*this);
162  }
163 
165  solist_iterator tmp = *this;
166  do ++*this;
167  while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
168 
169  return (tmp);
170  }
171 };
172 
173 template<typename Solist, typename T, typename U>
175  return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
176 }
177 template<typename Solist, typename T, typename U>
179  return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
180 }
181 
182 // Forward type and class definitions
183 typedef size_t sokey_t;
184 
185 
186 // Forward list in which elements are sorted in a split-order
187 template <typename T, typename Allocator>
188 class split_ordered_list
189 {
190 public:
192 
194 
195  struct node;
196  typedef node *nodeptr_t;
197 
203  // No support for reference/const_reference in allocator traits
205  typedef const value_type& const_reference;
206 
211 
212  // Node that holds the element in a split-ordered list
214  {
215  private:
216  // for compilers that try to generate default constructors though they are not needed.
217  node(); // VS 2008, 2010, 2012
218  public:
219  // Initialize the node with the given order key
220  void init(sokey_t order_key) {
221  my_order_key = order_key;
222  my_next = NULL;
223  }
224 
225  // Return the order key (needed for hashing)
226  sokey_t get_order_key() const { // TODO: remove
227  return my_order_key;
228  }
229 
230  // Inserts the new element in the list in an atomic fashion
232  {
233  // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
234  nodeptr_t exchange_node = tbb::internal::as_atomic(my_next).compare_and_swap(new_node, current_node);
235 
236  if (exchange_node == current_node) // TODO: why this branch?
237  {
238  // Operation succeeded, return the new node
239  return new_node;
240  }
241  else
242  {
243  // Operation failed, return the "interfering" node
244  return exchange_node;
245  }
246  }
247 
248  // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
249  // in the hash table to quickly index into the right subsection of the split-ordered list.
250  bool is_dummy() const {
251  return (my_order_key & 0x1) == 0;
252  }
253 
254 
255  nodeptr_t my_next; // Next element in the list
256  value_type my_element; // Element storage
257  sokey_t my_order_key; // Order key for this element
258  };
259 
260  // Allocate a new node with the given order key; used to allocate dummy nodes
262  nodeptr_t pnode = my_node_allocator.allocate(1);
263  pnode->init(order_key);
264  return (pnode);
265  }
266 
267  // Allocate a new node with the given order key and value
268  template<typename Arg>
271  nodeptr_t pnode = my_node_allocator.allocate(1);
272 
273  //TODO: use RAII scoped guard instead of explicit catch
274  __TBB_TRY {
275  new(static_cast<void*>(&pnode->my_element)) T(tbb::internal::forward<Arg>(t));
276  pnode->init(order_key);
277  } __TBB_CATCH(...) {
278  my_node_allocator.deallocate(pnode, 1);
279  __TBB_RETHROW();
280  }
281 
282  return (pnode);
283  }
284 
285  // A helper to avoid excessive requiremens in internal_insert
286  template<typename Arg>
288  /*AllowCreate=*/tbb::internal::false_type){
289  __TBB_ASSERT(false, "This compile-time helper should never get called");
290  return nodeptr_t();
291  }
292 
293  // Allocate a new node with the given parameters for constructing value
294  template<typename __TBB_PARAMETER_PACK Args>
296  nodeptr_t pnode = my_node_allocator.allocate(1);
297 
298  //TODO: use RAII scoped guard instead of explicit catch
299  __TBB_TRY {
300  new(static_cast<void*>(&pnode->my_element)) T(__TBB_PACK_EXPANSION(tbb::internal::forward<Args>(args)));
301  } __TBB_CATCH(...) {
302  my_node_allocator.deallocate(pnode, 1);
303  __TBB_RETHROW();
304  }
305 
306  return (pnode);
307  }
308 
311  {
312  // Immediately allocate a dummy node with order key of 0. This node
313  // will always be the head of the list.
315  }
316 
318  {
319  // Clear the list
320  clear();
321 
322  // Remove the head element which is not cleared by clear()
323  nodeptr_t pnode = my_head;
324  my_head = NULL;
325 
326  __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
327 
328  destroy_node(pnode);
329  }
330 
331  // Common forward list functions
332 
334  return (my_node_allocator);
335  }
336 
337  void clear() {
338  nodeptr_t pnext;
339  nodeptr_t pnode = my_head;
340 
341  __TBB_ASSERT(my_head != NULL, "Invalid head list node");
342  pnext = pnode->my_next;
343  pnode->my_next = NULL;
344  pnode = pnext;
345 
346  while (pnode != NULL)
347  {
348  pnext = pnode->my_next;
349  destroy_node(pnode);
350  pnode = pnext;
351  }
352 
353  my_element_count = 0;
354  }
355 
356  // Returns a first non-dummy element in the SOL
358  return first_real_iterator(raw_begin());
359  }
360 
361  // Returns a first non-dummy element in the SOL
363  return first_real_iterator(raw_begin());
364  }
365 
367  return (iterator(0, this));
368  }
369 
370  const_iterator end() const {
371  return (const_iterator(0, this));
372  }
373 
375  return (((const self_type *)this)->begin());
376  }
377 
379  return (((const self_type *)this)->end());
380  }
381 
382  // Checks if the number of elements (non-dummy) is 0
383  bool empty() const {
384  return (my_element_count == 0);
385  }
386 
387  // Returns the number of non-dummy elements in the list
388  size_type size() const {
389  return my_element_count;
390  }
391 
392  // Returns the maximum size of the list, determined by the allocator
393  size_type max_size() const {
394  return my_node_allocator.max_size();
395  }
396 
397  // Swaps 'this' list with the passed in one
398  void swap(self_type& other)
399  {
400  if (this == &other)
401  {
402  // Nothing to do
403  return;
404  }
405 
407  std::swap(my_head, other.my_head);
408  }
409 
410  // Split-order list functions
411 
412  // Returns a first element in the SOL, which is always a dummy
414  return raw_iterator(my_head);
415  }
416 
417  // Returns a first element in the SOL, which is always a dummy
419  return raw_const_iterator(my_head);
420  }
421 
423  return raw_iterator(0);
424  }
425 
427  return raw_const_iterator(0);
428  }
429 
431  return it.get_node_ptr()->get_order_key();
432  }
433 
435  if( !it.get_node_ptr() ) return ~sokey_t(0);
436  return it.get_node_ptr()->get_order_key();
437  }
438 
439  // Returns a public iterator version of the internal iterator. Public iterator must not
440  // be a dummy private iterator.
442  __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
443  return iterator(it.get_node_ptr(), this);
444  }
445 
446  // Returns a public iterator version of the internal iterator. Public iterator must not
447  // be a dummy private iterator.
449  __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
450  return const_iterator(it.get_node_ptr(), this);
451  }
452 
453  // Returns a non-const version of the raw_iterator
455  return raw_iterator(it.get_node_ptr());
456  }
457 
458  // Returns a non-const version of the iterator
460  return iterator(it.my_node_ptr, it.my_list_ptr);
461  }
462 
463  // Returns a public iterator version of a first non-dummy internal iterator at or after
464  // the passed in internal iterator.
466  {
467  // Skip all dummy, internal only iterators
468  while (it != raw_end() && it.get_node_ptr()->is_dummy())
469  ++it;
470 
471  return iterator(it.get_node_ptr(), this);
472  }
473 
474  // Returns a public iterator version of a first non-dummy internal iterator at or after
475  // the passed in internal iterator.
477  {
478  // Skip all dummy, internal only iterators
479  while (it != raw_end() && it.get_node_ptr()->is_dummy())
480  ++it;
481 
482  return const_iterator(it.get_node_ptr(), this);
483  }
484 
485  // Erase an element using the allocator
486  void destroy_node(nodeptr_t pnode) {
487  if (!pnode->is_dummy()) my_node_allocator.destroy(pnode);
488  my_node_allocator.deallocate(pnode, 1);
489  }
490 
491  // Try to insert a new element in the list.
492  // If insert fails, return the node that was inserted instead.
493  static nodeptr_t try_insert_atomic(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
494  new_node->my_next = current_node;
495  return previous->atomic_set_next(new_node, current_node);
496  }
497 
498  // Insert a new element between passed in iterators
499  std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, nodeptr_t pnode, size_type *new_count)
500  {
501  nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), pnode, next.get_node_ptr());
502 
503  if (inserted_node == pnode)
504  {
505  // If the insert succeeded, check that the order is correct and increment the element count
506  check_range(it, next);
507  *new_count = tbb::internal::as_atomic(my_element_count).fetch_and_increment();
508  return std::pair<iterator, bool>(iterator(pnode, this), true);
509  }
510  else
511  {
512  return std::pair<iterator, bool>(end(), false);
513  }
514  }
515 
516  // Insert a new dummy element, starting search at a parent dummy element
518  {
520  raw_iterator where = it;
521 
522  __TBB_ASSERT(where != last, "Invalid head node");
523 
524  ++where;
525 
526  // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
527  nodeptr_t dummy_node = create_node(order_key);
528 
529  for (;;)
530  {
531  __TBB_ASSERT(it != last, "Invalid head list node");
532 
533  // If the head iterator is at the end of the list, or past the point where this dummy
534  // node needs to be inserted, then try to insert it.
535  if (where == last || get_order_key(where) > order_key)
536  {
537  __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
538 
539  // Try to insert it in the right place
540  nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), dummy_node, where.get_node_ptr());
541 
542  if (inserted_node == dummy_node)
543  {
544  // Insertion succeeded, check the list for order violations
545  check_range(it, where);
546  return raw_iterator(dummy_node);
547  }
548  else
549  {
550  // Insertion failed: either dummy node was inserted by another thread, or
551  // a real element was inserted at exactly the same place as dummy node.
552  // Proceed with the search from the previous location where order key was
553  // known to be larger (note: this is legal only because there is no safe
554  // concurrent erase operation supported).
555  where = it;
556  ++where;
557  continue;
558  }
559  }
560  else if (get_order_key(where) == order_key)
561  {
562  // Another dummy node with the same value found, discard the new one.
563  destroy_node(dummy_node);
564  return where;
565  }
566 
567  // Move the iterator forward
568  it = where;
569  ++where;
570  }
571 
572  }
573 
575  nodeptr_t pnode = (where++).get_node_ptr();
576  nodeptr_t prevnode = previous.get_node_ptr();
577  __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
578  prevnode->my_next = pnode->my_next;
579  return pnode;
580  }
581 
582  // This erase function can handle both real and dummy nodes
584  /*allow_destroy*/tbb::internal::true_type)
585  {
586  nodeptr_t pnode = erase_node_impl(previous, where);
587  destroy_node(pnode);
588  }
589 
591  /*allow_destroy*/tbb::internal::false_type)
592  {
593  erase_node_impl(previous, where);
594  }
595 
596  void erase_node(raw_iterator previous, raw_const_iterator& where) {
597  erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type());
598  }
599 
600  // Erase the element (previous node needs to be passed because this is a forward only list)
601  template<typename AllowDestroy>
602  iterator erase_node(raw_iterator previous, const_iterator where, AllowDestroy)
603  {
604  raw_const_iterator it = where;
605  erase_node(previous, it, AllowDestroy());
607 
608  return get_iterator(first_real_iterator(it));
609  }
610 
612  return erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type());
613  }
614 
615 
616 
617  // Move all elements from the passed in split-ordered list to this one
618  void move_all(self_type& source)
619  {
621  raw_const_iterator last = source.raw_end();
622 
623  if (first == last)
624  return;
625 
626  nodeptr_t previous_node = my_head;
627  raw_const_iterator begin_iterator = first++;
628 
629  // Move all elements one by one, including dummy ones
630  for (raw_const_iterator it = first; it != last;)
631  {
632  nodeptr_t pnode = it.get_node_ptr();
633 
634  nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
635  previous_node = try_insert_atomic(previous_node, dummy_node, NULL);
636  __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
637  raw_const_iterator where = it++;
638  source.erase_node(get_iterator(begin_iterator), where);
639  }
640  check_range();
641  }
642 
643 
644 private:
645  //Need to setup private fields of split_ordered_list in move constructor and assignment of concurrent_unordered_base
646  template <typename Traits>
648 
649  // Check the list for order violations
651  {
652 #if TBB_USE_ASSERT
653  for (raw_iterator it = first; it != last; ++it)
654  {
655  raw_iterator next = it;
656  ++next;
657 
658  __TBB_ASSERT(next == raw_end() || get_order_key(next) >= get_order_key(it), "!!! List order inconsistency !!!");
659  }
660 #else
662 #endif
663  }
664  void check_range()
665  {
666 #if TBB_USE_ASSERT
667  check_range( raw_begin(), raw_end() );
668 #endif
669  }
670 
672  size_type my_element_count; // Total item count, not counting dummy nodes
673  nodeptr_t my_head; // pointer to head node
674 };
675 
676 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
677 #pragma warning(push)
678 #pragma warning(disable: 4127) // warning C4127: conditional expression is constant
679 #endif
680 
681 template <typename Traits>
682 class concurrent_unordered_base : public Traits
683 {
684 protected:
685  // Type definitions
687  typedef typename Traits::value_type value_type;
688  typedef typename Traits::key_type key_type;
689  typedef typename Traits::hash_compare hash_compare;
690  typedef typename Traits::allocator_type allocator_type;
691  typedef typename hash_compare::hasher hasher;
693 
698  // No support for reference/const_reference in allocator
701 
703  typedef typename solist_t::nodeptr_t nodeptr_t;
704  // Iterators that walk the entire split-order list, including dummy nodes
707  typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
711 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
712  typedef typename Traits::node_type node_type;
713 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
714  using Traits::my_hash_compare;
715  using Traits::get_key;
716  using Traits::allow_multimapping;
717 
718  static const size_type initial_bucket_number = 8; // Initial number of buckets
719 
720 private:
721  template<typename OtherTraits>
723 
724  typedef std::pair<iterator, iterator> pairii_t;
725  typedef std::pair<const_iterator, const_iterator> paircc_t;
726 
727  static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit
728  static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket
729 
733  void dismiss(){ my_instance = NULL;}
735  if (my_instance){
737  }
738  }
739  };
740 protected:
741  // Constructors/Destructors
743  const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
744  : Traits(hc), my_solist(a),
746  {
747  if( n_of_buckets == 0) ++n_of_buckets;
748  my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2
749  internal_init();
750  }
751 
753  : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
754  {
755  internal_init();
756  internal_copy(right);
757  }
758 
760  : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
761  {
762  //FIXME:exception safety seems to be broken here
763  internal_init();
764  internal_copy(right);
765  }
766 
767 #if __TBB_CPP11_RVALUE_REF_PRESENT
769  : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()),
771  {
773  internal_init();
774  swap(right);
775  }
776 
778  : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
779  {
780  call_internal_clear_on_exit clear_buckets_on_exception(this);
781 
782  internal_init();
783  if (a == right.get_allocator()){
786  this->swap(right);
787  }else{
788  my_maximum_bucket_size = right.my_maximum_bucket_size;
789  my_number_of_buckets = right.my_number_of_buckets;
790  my_solist.my_element_count = right.my_solist.my_element_count;
791 
792  if (! right.my_solist.empty()){
793  nodeptr_t previous_node = my_solist.my_head;
794 
795  // Move all elements one by one, including dummy ones
796  for (raw_const_iterator it = ++(right.my_solist.raw_begin()), last = right.my_solist.raw_end(); it != last; ++it)
797  {
798  const nodeptr_t pnode = it.get_node_ptr();
799  nodeptr_t node;
800  if (pnode->is_dummy()) {
801  node = my_solist.create_node(pnode->get_order_key());
802  size_type bucket = __TBB_ReverseBits(pnode->get_order_key()) % my_number_of_buckets;
803  set_bucket(bucket, node);
804  }else{
805  node = my_solist.create_node(pnode->get_order_key(), std::move(pnode->my_element));
806  }
807 
808  previous_node = my_solist.try_insert_atomic(previous_node, node, NULL);
809  __TBB_ASSERT(previous_node != NULL, "Insertion of node failed. Concurrent inserts in constructor ?");
810  }
812  }
813  }
814 
815  clear_buckets_on_exception.dismiss();
816  }
817 
818 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
819 
821  if (this != &right)
822  internal_copy(right);
823  return (*this);
824  }
825 
826 #if __TBB_CPP11_RVALUE_REF_PRESENT
828  {
829  if(this != &other){
831  if(pocma_t::value || this->my_allocator == other.my_allocator) {
832  concurrent_unordered_base trash (std::move(*this));
833  swap(other);
834  if (pocma_t::value) {
835  using std::swap;
836  //TODO: swapping allocators here may be a problem, replace with single direction moving
837  swap(this->my_solist.my_node_allocator, other.my_solist.my_node_allocator);
838  swap(this->my_allocator, other.my_allocator);
839  }
840  } else {
841  concurrent_unordered_base moved_copy(std::move(other),this->my_allocator);
842  this->swap(moved_copy);
843  }
844  }
845  return *this;
846  }
847 
848 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
849 
850 #if __TBB_INITIALIZER_LISTS_PRESENT
851  concurrent_unordered_base& operator=(std::initializer_list<value_type> il)
853  {
854  this->clear();
855  this->insert(il.begin(),il.end());
856  return (*this);
857  }
858 #endif // __TBB_INITIALIZER_LISTS_PRESENT
859 
860 
862  // Delete all node segments
863  internal_clear();
864  }
865 
866 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
867  template<typename SourceType>
868  void internal_merge(SourceType& source) {
869  typedef typename SourceType::iterator source_iterator;
871  typename SourceType::node_type>::value),
872  "Incompatible containers cannot be merged");
873 
874  for(source_iterator it = source.begin(); it != source.end();) {
875  source_iterator where = it++;
876  if (allow_multimapping || find(get_key(*where)) == end()) {
877  std::pair<node_type, raw_iterator> extract_result = source.internal_extract(where);
878 
879  // If the insertion fails, it returns ownership of the node to extract_result.first
880  // extract_result.first remains valid node handle
881  if (!insert(std::move(extract_result.first)).second) {
882  raw_iterator next = extract_result.second;
883  raw_iterator current = next++;
884 
885  __TBB_ASSERT(extract_result.first.my_node->get_order_key() >= current.get_node_ptr()->get_order_key(),
886  "Wrong nodes order in source container");
887  __TBB_ASSERT(next==source.my_solist.raw_end() ||
888  extract_result.first.my_node->get_order_key() <= next.get_node_ptr()->get_order_key(),
889  "Wrong nodes order in source container");
890 
891  size_t new_count = 0;// To use try_insert()
892  bool insert_result =
893  source.my_solist.try_insert(current, next, extract_result.first.my_node, &new_count).second;
894  __TBB_ASSERT_EX(insert_result, "Return to source must be successful. "
895  "Changing source container while merging is unsafe.");
896  }
897  extract_result.first.deactivate();
898  }
899  }
900  }
901 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
902 
903 public:
905  return my_solist.get_allocator();
906  }
907 
908  // Size and capacity function
909  bool empty() const {
910  return my_solist.empty();
911  }
912 
913  size_type size() const {
914  return my_solist.size();
915  }
916 
917  size_type max_size() const {
918  return my_solist.max_size();
919  }
920 
921  // Iterators
923  return my_solist.begin();
924  }
925 
927  return my_solist.begin();
928  }
929 
931  return my_solist.end();
932  }
933 
934  const_iterator end() const {
935  return my_solist.end();
936  }
937 
939  return my_solist.cbegin();
940  }
941 
943  return my_solist.cend();
944  }
945 
946  // Parallel traversal support
952  public:
959 
961  bool empty() const {return my_begin_node == my_end_node;}
962 
964  bool is_divisible() const {
965  return my_midpoint_node != my_end_node;
966  }
970  {
972  __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
973  __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
974  set_midpoint();
975  r.set_midpoint();
976  }
979  my_table(a_table), my_begin_node(a_table.my_solist.begin()),
980  my_end_node(a_table.my_solist.end())
981  {
982  set_midpoint();
983  }
987  size_type grainsize() const { return 1; }
988 
990  void set_midpoint() const {
991  if( my_begin_node == my_end_node ) // not divisible
993  else {
996  size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
997  while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
998  if(__TBB_ReverseBits(mid_bucket) > begin_key) {
999  // found a dummy_node between begin and end
1001  }
1002  else {
1003  // didn't find a dummy node between begin and end.
1005  }
1006 #if TBB_USE_ASSERT
1007  {
1009  __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
1010  __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
1011  }
1012 #endif // TBB_USE_ASSERT
1013  }
1014  }
1015  };
1016 
1017  class range_type : public const_range_type {
1018  public:
1024 
1027  };
1028 
1029  range_type range() {
1030  return range_type( *this );
1031  }
1032 
1033  const_range_type range() const {
1034  return const_range_type( *this );
1035  }
1036 
1037  // Modifiers
1038  std::pair<iterator, bool> insert(const value_type& value) {
1039  return internal_insert</*AllowCreate=*/tbb::internal::true_type,
1040  /*AllowDestroy=*/tbb::internal::true_type>(value);
1041  }
1042 
1044  // Ignore hint
1045  return insert(value).first;
1046  }
1047 
1048 #if __TBB_CPP11_RVALUE_REF_PRESENT
1049  std::pair<iterator, bool> insert(value_type&& value) {
1050  return internal_insert</*AllowCreate=*/tbb::internal::true_type,
1051  /*AllowDestroy=*/tbb::internal::true_type>(std::move(value));
1052  }
1053 
1055  // Ignore hint
1056  return insert(std::move(value)).first;
1057  }
1058 #endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/
1059 
1060 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1061  std::pair<iterator, bool> insert(node_type&& nh) {
1062  if (!nh.empty()) {
1063  nodeptr_t handled_node = nh.my_node;
1064  std::pair<iterator, bool> insert_result =
1066  /*AllowDestroy=*/tbb::internal::false_type>
1067  (handled_node->my_element, handled_node);
1068  if (insert_result.second)
1069  nh.deactivate();
1070  return insert_result;
1071  }
1072  return std::pair<iterator, bool>(end(), false);
1073  }
1074 
1076  return insert(std::move(nh)).first;
1077  }
1078 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1079 
1080 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT
1081  template<typename... Args>
1082  std::pair<iterator, bool> emplace(Args&&... args) {
1083  nodeptr_t pnode = my_solist.create_node_v(tbb::internal::forward<Args>(args)...);
1084 
1085  return internal_insert</*AllowCreate=*/tbb::internal::false_type,
1086  /*AllowDestroy=*/tbb::internal::true_type>(pnode->my_element, pnode);
1087  }
1088 
1089  template<typename... Args>
1091  // Ignore hint
1092  return emplace(tbb::internal::forward<Args>(args)...).first;
1093  }
1094 #endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT
1095 
1096 
1097  template<class Iterator>
1098  void insert(Iterator first, Iterator last) {
1099  for (Iterator it = first; it != last; ++it)
1100  insert(*it);
1101  }
1102 
1103 #if __TBB_INITIALIZER_LISTS_PRESENT
1104  void insert(std::initializer_list<value_type> il) {
1106  insert(il.begin(), il.end());
1107  }
1108 #endif
1109 
1111  return internal_erase(where);
1112  }
1113 
1115  while (first != last)
1116  unsafe_erase(first++);
1117  return my_solist.get_iterator(first);
1118  }
1119 
1121  pairii_t where = equal_range(key);
1122  size_type item_count = internal_distance(where.first, where.second);
1123  unsafe_erase(where.first, where.second);
1124  return item_count;
1125  }
1126 
1127 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1129  return internal_extract(where).first;
1130  }
1131 
1133  pairii_t where = equal_range(key);
1134  if (where.first == end()) return node_type(); // element was not found
1135  return internal_extract(where.first).first;
1136  }
1137 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1138 
1140  if (this != &right) {
1141  std::swap(my_hash_compare, right.my_hash_compare);
1142  my_solist.swap(right.my_solist);
1143  internal_swap_buckets(right);
1146  }
1147  }
1148 
1149  // Observers
1151  return my_hash_compare.my_hash_object;
1152  }
1153 
1154  key_equal key_eq() const {
1155  return my_hash_compare.my_key_compare_object;
1156  }
1157 
1158  void clear() {
1159  // Clear list
1160  my_solist.clear();
1161 
1162  // Clear buckets
1163  internal_clear();
1164 
1165  // Initialize bucket 0
1166  __TBB_ASSERT(my_buckets[0] == NULL, NULL);
1167  raw_iterator dummy_node = my_solist.raw_begin();
1168  set_bucket(0, dummy_node);
1169  }
1170 
1171  // Lookup
1173  return internal_find(key);
1174  }
1175 
1177  return const_cast<self_type*>(this)->internal_find(key);
1178  }
1179 
1180  size_type count(const key_type& key) const {
1181  if(allow_multimapping) {
1182  paircc_t answer = equal_range(key);
1183  size_type item_count = internal_distance(answer.first, answer.second);
1184  return item_count;
1185  } else {
1186  return const_cast<self_type*>(this)->internal_find(key) == end()?0:1;
1187  }
1188  }
1189 
1190  std::pair<iterator, iterator> equal_range(const key_type& key) {
1191  return internal_equal_range(key);
1192  }
1193 
1194  std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
1195  return const_cast<self_type*>(this)->internal_equal_range(key);
1196  }
1197 
1198  // Bucket interface - for debugging
1200  return my_number_of_buckets;
1201  }
1202 
1204  return segment_size(pointers_per_table-1);
1205  }
1206 
1208  size_type item_count = 0;
1209  if (is_initialized(bucket)) {
1210  raw_iterator it = get_bucket(bucket);
1211  ++it;
1212  for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
1213  ++item_count;
1214  }
1215  return item_count;
1216  }
1217 
1219  sokey_t order_key = (sokey_t) my_hash_compare(key);
1220  size_type bucket = order_key % my_number_of_buckets;
1221  return bucket;
1222  }
1223 
1224  // If the bucket is initialized, return a first non-dummy element in it
1226  if (!is_initialized(bucket))
1227  return end();
1228 
1229  raw_iterator it = get_bucket(bucket);
1230  return my_solist.first_real_iterator(it);
1231  }
1232 
1233  // If the bucket is initialized, return a first non-dummy element in it
1235  {
1236  if (!is_initialized(bucket))
1237  return end();
1238 
1239  raw_const_iterator it = get_bucket(bucket);
1240  return my_solist.first_real_iterator(it);
1241  }
1242 
1243  // @REVIEW: Takes O(n)
1244  // Returns the iterator after the last non-dummy element in the bucket
1246  {
1247  if (!is_initialized(bucket))
1248  return end();
1249 
1250  raw_iterator it = get_bucket(bucket);
1251 
1252  // Find the end of the bucket, denoted by the dummy element
1253  do ++it;
1254  while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1255 
1256  // Return the first real element past the end of the bucket
1257  return my_solist.first_real_iterator(it);
1258  }
1259 
1260  // @REVIEW: Takes O(n)
1261  // Returns the iterator after the last non-dummy element in the bucket
1263  {
1264  if (!is_initialized(bucket))
1265  return end();
1266 
1267  raw_const_iterator it = get_bucket(bucket);
1268 
1269  // Find the end of the bucket, denoted by the dummy element
1270  do ++it;
1271  while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1272 
1273  // Return the first real element past the end of the bucket
1274  return my_solist.first_real_iterator(it);
1275  }
1276 
1278  return ((const self_type *) this)->unsafe_begin(bucket);
1279  }
1280 
1282  return ((const self_type *) this)->unsafe_end(bucket);
1283  }
1284 
1285  // Hash policy
1286  float load_factor() const {
1287  return (float) size() / (float) unsafe_bucket_count();
1288  }
1289 
1290  float max_load_factor() const {
1291  return my_maximum_bucket_size;
1292  }
1293 
1294  void max_load_factor(float newmax) {
1295  if (newmax != newmax || newmax < 0)
1297  my_maximum_bucket_size = newmax;
1298  }
1299 
1300  // This function is a noop, because the underlying split-ordered list
1301  // is already sorted, so an increase in the bucket number will be
1302  // reflected next time this bucket is touched.
1303  void rehash(size_type buckets) {
1304  size_type current_buckets = my_number_of_buckets;
1305  if (current_buckets >= buckets)
1306  return;
1307  my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2
1308  }
1309 
1310 private:
1311 
1312  // Initialize the hash and keep the first bucket open
1313  void internal_init() {
1314  // Initialize the array of segment pointers
1315  memset(my_buckets, 0, sizeof(my_buckets));
1316 
1317  // Initialize bucket 0
1318  raw_iterator dummy_node = my_solist.raw_begin();
1319  set_bucket(0, dummy_node);
1320  }
1321 
1323  for (size_type index = 0; index < pointers_per_table; ++index) {
1324  if (my_buckets[index] != NULL) {
1325  size_type sz = segment_size(index);
1326  for (size_type index2 = 0; index2 < sz; ++index2)
1327  my_allocator.destroy(&my_buckets[index][index2]);
1328  my_allocator.deallocate(my_buckets[index], sz);
1329  my_buckets[index] = 0;
1330  }
1331  }
1332  }
1333 
1334  void internal_copy(const self_type& right) {
1335  clear();
1336 
1339 
1340  __TBB_TRY {
1341  insert(right.begin(), right.end());
1342  my_hash_compare = right.my_hash_compare;
1343  } __TBB_CATCH(...) {
1344  my_solist.clear();
1345  __TBB_RETHROW();
1346  }
1347  }
1348 
1350  {
1351  // Swap all node segments
1352  for (size_type index = 0; index < pointers_per_table; ++index)
1353  {
1354  raw_iterator * iterator_pointer = my_buckets[index];
1355  my_buckets[index] = right.my_buckets[index];
1356  right.my_buckets[index] = iterator_pointer;
1357  }
1358  }
1359 
1360  //TODO: why not use std::distance?
1361  // Hash APIs
1363  {
1364  size_type num = 0;
1365 
1366  for (const_iterator it = first; it != last; ++it)
1367  ++num;
1368 
1369  return num;
1370  }
1371 
1372  // Insert an element in the hash given its value
1373  template<typename AllowCreate, typename AllowDestroy, typename ValueType>
1374  std::pair<iterator, bool> internal_insert(__TBB_FORWARDING_REF(ValueType) value, nodeptr_t pnode = NULL)
1375  {
1376  const key_type *pkey = &get_key(value);
1377  sokey_t hash_key = (sokey_t) my_hash_compare(*pkey);
1378  size_type new_count = 0;
1379  sokey_t order_key = split_order_key_regular(hash_key);
1380  raw_iterator previous = prepare_bucket(hash_key);
1382  __TBB_ASSERT(previous != last, "Invalid head node");
1383 
1384  // First node is a dummy node
1385  for (raw_iterator where = previous;;)
1386  {
1387  ++where;
1388  if (where == last || solist_t::get_order_key(where) > order_key ||
1389  // if multimapped, stop at the first item equal to us.
1390  (allow_multimapping && solist_t::get_order_key(where) == order_key &&
1391  !my_hash_compare(get_key(*where), *pkey))) // TODO: fix negation
1392  {
1393  if (!pnode) {
1394  pnode = my_solist.create_node(order_key, tbb::internal::forward<ValueType>(value), AllowCreate());
1395  // If the value was moved, the known reference to key might be invalid
1396  pkey = &get_key(pnode->my_element);
1397  }
1398  else
1399  {
1400  // Set new order_key to node
1401  pnode->init(order_key);
1402  }
1403 
1404  // Try to insert 'pnode' between 'previous' and 'where'
1405  std::pair<iterator, bool> result = my_solist.try_insert(previous, where, pnode, &new_count);
1406 
1407  if (result.second)
1408  {
1409  // Insertion succeeded, adjust the table size, if needed
1411  return result;
1412  }
1413  else
1414  {
1415  // Insertion failed: either the same node was inserted by another thread, or
1416  // another element was inserted at exactly the same place as this node.
1417  // Proceed with the search from the previous location where order key was
1418  // known to be larger (note: this is legal only because there is no safe
1419  // concurrent erase operation supported).
1420  where = previous;
1421  continue;
1422  }
1423  }
1424  else if (!allow_multimapping && solist_t::get_order_key(where) == order_key &&
1425  !my_hash_compare(get_key(*where), *pkey)) // TODO: fix negation
1426  { // Element already in the list, return it
1427  if (pnode && AllowDestroy::value)
1428  my_solist.destroy_node(pnode);
1429  return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
1430  }
1431  // Move the iterator forward
1432  previous = where;
1433  }
1434  }
1435 
1436  // Find the element in the split-ordered list
1438  {
1439  sokey_t hash_key = (sokey_t) my_hash_compare(key);
1440  sokey_t order_key = split_order_key_regular(hash_key);
1442 
1443  for (raw_iterator it = prepare_bucket(hash_key); it != last; ++it)
1444  {
1445  if (solist_t::get_order_key(it) > order_key)
1446  {
1447  // If the order key is smaller than the current order key, the element
1448  // is not in the hash.
1449  return end();
1450  }
1451  else if (solist_t::get_order_key(it) == order_key)
1452  {
1453  // The fact that order keys match does not mean that the element is found.
1454  // Key function comparison has to be performed to check whether this is the
1455  // right element. If not, keep searching while order key is the same.
1456  if (!my_hash_compare(get_key(*it), key)) // TODO: fix negation
1457  return my_solist.get_iterator(it);
1458  }
1459  }
1460 
1461  return end();
1462  }
1463 
1464  // Erase an element from the list. This is not a concurrency safe function.
1466  {
1467  sokey_t hash_key = (sokey_t) my_hash_compare(get_key(*it));
1468  raw_iterator previous = prepare_bucket(hash_key);
1470  __TBB_ASSERT(previous != last, "Invalid head node");
1471 
1472  // First node is a dummy node
1473  for (raw_iterator where = previous; where != last; previous = where) {
1474  ++where;
1475  if (my_solist.get_iterator(where) == it)
1476  return my_solist.erase_node(previous, it);
1477  }
1478  return end();
1479  }
1480 
1481 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1482  std::pair<node_type, raw_iterator> internal_extract(const_iterator it) {
1483  sokey_t hash_key = sokey_t(my_hash_compare(get_key(*it)));
1484  raw_iterator previous = prepare_bucket(hash_key);
1486  __TBB_ASSERT(previous != last, "Invalid head node");
1487 
1488  for(raw_iterator where = previous; where != last; previous = where) {
1489  ++where;
1490  if (my_solist.get_iterator(where) == it) {
1491  const_iterator result = it;
1492  my_solist.erase_node(previous, it, /*allow_destroy*/tbb::internal::false_type());
1493  return std::pair<node_type, raw_iterator>( node_type(result.get_node_ptr()),
1494  previous);
1495  }
1496  }
1497  return std::pair<node_type, iterator>(node_type(), end());
1498  }
1499 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1500 
1501  // Return the [begin, end) pair of iterators with the same key values.
1502  // This operation makes sense only if mapping is many-to-one.
1504  {
1505  sokey_t hash_key = (sokey_t) my_hash_compare(key);
1506  sokey_t order_key = split_order_key_regular(hash_key);
1507  raw_iterator end_it = my_solist.raw_end();
1508 
1509  for (raw_iterator it = prepare_bucket(hash_key); it != end_it; ++it)
1510  {
1511  if (solist_t::get_order_key(it) > order_key)
1512  {
1513  // There is no element with the given key
1514  return pairii_t(end(), end());
1515  }
1516  else if (solist_t::get_order_key(it) == order_key &&
1517  !my_hash_compare(get_key(*it), key)) // TODO: fix negation; also below
1518  {
1520  iterator last = first;
1521  do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) );
1522  return pairii_t(first, last);
1523  }
1524  }
1525 
1526  return pairii_t(end(), end());
1527  }
1528 
1529  // Bucket APIs
1530  void init_bucket(size_type bucket)
1531  {
1532  // Bucket 0 has no parent.
1533  __TBB_ASSERT( bucket != 0, "The first bucket must always be initialized");
1534 
1535  size_type parent_bucket = get_parent(bucket);
1536 
1537  // All parent_bucket buckets have to be initialized before this bucket is
1538  if (!is_initialized(parent_bucket))
1539  init_bucket(parent_bucket);
1540 
1541  raw_iterator parent = get_bucket(parent_bucket);
1542 
1543  // Create a dummy first node in this bucket
1545  set_bucket(bucket, dummy_node);
1546  }
1547 
1548  void adjust_table_size(size_type total_elements, size_type current_size)
1549  {
1550  // Grow the table by a factor of 2 if possible and needed
1551  if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
1552  {
1553  // Double the size of the hash only if size has not changed in between loads
1554  my_number_of_buckets.compare_and_swap(2u*current_size, current_size);
1555  //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8
1556  //due to overzealous compiler warnings in /Wp64 mode
1557  }
1558  }
1559 
1561  {
1562  // Unsets bucket's most significant turned-on bit
1563  size_type msb = __TBB_Log2((uintptr_t)bucket);
1564  return bucket & ~(size_type(1) << msb);
1565  }
1566 
1567 
1568  // Dynamic sized array (segments)
1571  return size_type( __TBB_Log2( uintptr_t(index|1) ) );
1572  }
1573 
1576  return (size_type(1)<<k & ~size_type(1));
1577  }
1578 
1581  return k? size_type(1)<<k : 2;
1582  }
1583 
1585  size_type segment = segment_index_of(bucket);
1586  bucket -= segment_base(segment);
1587  __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
1588  return my_buckets[segment][bucket];
1589  }
1590 
1592  size_type bucket = hash_key % my_number_of_buckets;
1593  size_type segment = segment_index_of(bucket);
1594  size_type index = bucket - segment_base(segment);
1595  if (my_buckets[segment] == NULL || my_buckets[segment][index].get_node_ptr() == NULL)
1596  init_bucket(bucket);
1597  return my_buckets[segment][index];
1598  }
1599 
1600  void set_bucket(size_type bucket, raw_iterator dummy_head) {
1601  size_type segment = segment_index_of(bucket);
1602  bucket -= segment_base(segment);
1603 
1604  if (my_buckets[segment] == NULL) {
1605  size_type sz = segment_size(segment);
1606  raw_iterator * new_segment = my_allocator.allocate(sz);
1607  std::memset(static_cast<void*>(new_segment), 0, sz*sizeof(raw_iterator));
1608 
1609  if (my_buckets[segment].compare_and_swap( new_segment, NULL) != NULL)
1610  my_allocator.deallocate(new_segment, sz);
1611  }
1612 
1613  my_buckets[segment][bucket] = dummy_head;
1614  }
1615 
1616  bool is_initialized(size_type bucket) const {
1617  size_type segment = segment_index_of(bucket);
1618  bucket -= segment_base(segment);
1619 
1620  if (my_buckets[segment] == NULL)
1621  return false;
1622 
1623  raw_iterator it = my_buckets[segment][bucket];
1624  return (it.get_node_ptr() != NULL);
1625  }
1626 
1627  // Utilities for keys
1628 
1629  // A regular order key has its original hash value reversed and the last bit set
1631  return __TBB_ReverseBits(order_key) | 0x1;
1632  }
1633 
1634  // A dummy order key has its original hash value reversed and the last bit unset
1636  return __TBB_ReverseBits(order_key) & ~sokey_t(0x1);
1637  }
1638 
1639  // Shared variables
1641  solist_t my_solist; // List where all the elements are kept
1643  float my_maximum_bucket_size; // Maximum size of the bucket
1645 };
1646 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1647 #pragma warning(pop) // warning 4127 is back
1648 #endif
1649 
1650 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1651 
1652 template<typename Value, typename Allocator>
1654 public:
1655  typedef Allocator allocator_type;
1656 protected:
1658 public:
1659 
1662  my_allocator(std::move(nh.my_allocator)) {
1663  nh.my_node = NULL;
1664  }
1665 
1666  bool empty() const { return my_node == NULL; }
1667  explicit operator bool() const { return my_node != NULL; }
1668 
1670 
1672  internal_destroy();
1673  my_node = nh.my_node;
1675  propagate_on_container_move_assignment pocma_type;
1676  tbb::internal::allocator_move_assignment(my_allocator, nh.my_allocator, pocma_type());
1677  nh.deactivate();
1678  return *this;
1679  }
1680 
1682  std::swap(my_node, nh.my_node);
1684  propagate_on_container_swap pocs_type;
1686  }
1687 
1689  return my_allocator;
1690  }
1691 
1692 protected:
1694 
1696  if(my_node) {
1697  my_allocator.destroy(&(my_node->my_element));
1698  // TODO: Consider using node_allocator from the container
1700  node_allocator.deallocate(my_node, 1);
1701  }
1702  }
1703 
1704  void deactivate() { my_node = NULL; }
1705 
1708 };
1709 
1710 // node handle for concurrent_unordered maps
1711 template<typename Key, typename Value, typename Allocator>
1712 class node_handle : public node_handle_base<Value, Allocator> {
1714 public:
1715  typedef Key key_type;
1716  typedef typename Value::second_type mapped_type;
1718 
1720 
1721  key_type& key() const {
1722  __TBB_ASSERT(!this->empty(), "Cannot get key from the empty node_type object");
1723  return *const_cast<key_type*>(&(this->my_node->my_element.first));
1724  }
1725 
1726  mapped_type& mapped() const {
1727  __TBB_ASSERT(!this->empty(), "Cannot get mapped value from the empty node_type object");
1728  return this->my_node->my_element.second;
1729  }
1730 
1731 private:
1732  template<typename T, typename A>
1733  friend class split_ordered_list;
1734 
1735  template<typename Traits>
1737 
1739 };
1740 
1741 // node handle for concurrent_unordered sets
1742 template<typename Key, typename Allocator>
1743 class node_handle<Key, Key, Allocator> : public node_handle_base<Key, Allocator> {
1745 public:
1746  typedef Key value_type;
1748 
1750 
1751  value_type& value() const {
1752  __TBB_ASSERT(!this->empty(), "Cannot get value from the empty node_type object");
1753  return *const_cast<value_type*>(&(this->my_node->my_element));
1754  }
1755 
1756 private:
1757  template<typename T, typename A>
1758  friend class split_ordered_list;
1759 
1760  template<typename Traits>
1762 
1764 };
1765 
1766 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1767 
1768 } // namespace internal
1770 } // namespace interface5
1771 } // namespace tbb
1772 #endif // __TBB__concurrent_unordered_impl_H
std::pair< node_type, raw_iterator > internal_extract(const_iterator it)
tbb::internal::allocator_traits< allocator_type >::size_type size_type
solist_iterator(const solist_iterator< Solist, typename Solist::value_type > &other)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
intptr_t __TBB_Log2(uintptr_t x)
Definition: tbb_machine.h:860
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
tbb::internal::allocator_traits< allocator_type >::pointer pointer
flist_iterator< self_type, value_type > raw_iterator
static sokey_t get_order_key(const raw_const_iterator &it)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
flist_iterator< self_type, const value_type > raw_const_iterator
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance * instance
friend bool operator!=(const flist_iterator< M, T > &i, const flist_iterator< M, U > &j)
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:395
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert
Definition: tbb_stddef.h:167
nodeptr_t create_node(sokey_t, __TBB_FORWARDING_REF(Arg), tbb::internal::false_type)
atomic< raw_iterator * > my_buckets[pointers_per_table]
void erase_node(raw_iterator previous, raw_const_iterator &where, tbb::internal::true_type)
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:305
iterator erase_node(raw_iterator previous, const_iterator where, AllowDestroy)
friend bool operator!=(const solist_iterator< M, T > &i, const solist_iterator< M, U > &j)
std::pair< iterator, bool > insert(value_type &&value)
iterator emplace_hint(const_iterator, Args &&... args)
bool is_divisible() const
True if range can be partitioned into two subranges.
split_ordered_list(allocator_type a=allocator_type())
#define __TBB_CATCH(e)
Definition: tbb_stddef.h:284
tbb::internal::allocator_traits< allocator_type >::const_pointer const_pointer
allocator_type::pointer pointer
#define __TBB_FORWARDING_REF(A)
Definition: tbb_stddef.h:496
std::pair< iterator, bool > insert(const value_type &value)
tbb::internal::allocator_rebind< Allocator, T >::type allocator_type
solist_iterator< self_type, value_type > iterator
iterator insert(const_iterator, const value_type &value)
Detects whether two given types are the same.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
std::pair< iterator, bool > emplace(Args &&... args)
concurrent_unordered_base::size_type size_type
Type for size of a range.
const_local_iterator unsafe_cbegin(size_type bucket) const
tbb::internal::allocator_traits< allocator_type >::const_pointer const_pointer
iterator unsafe_erase(const_iterator first, const_iterator last)
The graph class.
#define __TBB_TRY
Definition: tbb_stddef.h:283
void adjust_table_size(size_type total_elements, size_type current_size)
split_ordered_list< value_type, typename Traits::allocator_type > solist_t
concurrent_unordered_base(concurrent_unordered_base &&right, const allocator_type &a)
#define __TBB_PARAMETER_PACK
Definition: tbb_stddef.h:503
allocator_type::size_type size_type
allocator_type::difference_type difference_type
Base class for types that should not be assigned.
Definition: tbb_stddef.h:320
node_handle_base< Value, Allocator > base_type
raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
tbb::internal::allocator_traits< allocator_type >::size_type size_type
tbb::internal::allocator_traits< allocator_type >::difference_type difference_type
const_range_type(const concurrent_unordered_base &a_table)
Init range with container and grainsize specified.
const_local_iterator unsafe_end(size_type bucket) const
std::pair< iterator, bool > try_insert(raw_iterator it, raw_iterator next, nodeptr_t pnode, size_type *new_count)
range_type(const concurrent_unordered_base &a_table)
Init range with container and grainsize specified.
const_iterator get_iterator(raw_const_iterator it) const
nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
bool operator==(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
auto first(Container &c) -> decltype(begin(c))
void swap(atomic< T > &lhs, atomic< T > &rhs)
Definition: atomic.h:535
const_iterator first_real_iterator(raw_const_iterator it) const
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:377
allocator_traits< Alloc >::template rebind_alloc< T >::other type
nodeptr_t create_node(sokey_t order_key, __TBB_FORWARDING_REF(Arg) t, tbb::internal::true_type=tbb::internal::true_type())
iterator erase_node(raw_iterator previous, const_iterator &where)
bool operator!=(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
iterator insert(const_iterator, value_type &&value)
tbb::internal::allocator_rebind< allocator_type, raw_iterator >::type my_allocator
concurrent_unordered_base(const concurrent_unordered_base &right, const allocator_type &a)
atomic< T > & as_atomic(T &t)
Definition: atomic.h:543
node_handle_base & operator=(node_handle_base &&nh)
std::pair< iterator, bool > insert(node_type &&nh)
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
concurrent_unordered_base(const concurrent_unordered_base &right)
#define __TBB_RETHROW()
Definition: tbb_stddef.h:286
allocator_type::const_pointer const_pointer
void set_bucket(size_type bucket, raw_iterator dummy_head)
#define __TBB_PACK_EXPANSION(A)
Definition: tbb_stddef.h:504
nodeptr_t create_node_v(__TBB_FORWARDING_REF(Args) __TBB_PARAMETER_PACK args)
tbb::internal::allocator_traits< allocator_type >::value_type value_type
auto last(Container &c) -> decltype(begin(c))
bool_constant< false > false_type
Definition: tbb_stddef.h:469
split_ordered_list< Value, allocator_type >::node node
static nodeptr_t try_insert_atomic(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node)
tbb::internal::allocator_traits< allocator_type >::pointer pointer
flist_iterator(const flist_iterator< Solist, typename Solist::value_type > &other)
tbb::internal::allocator_traits< allocator_type >::difference_type difference_type
T __TBB_ReverseBits(T src)
Definition: tbb_machine.h:967
concurrent_unordered_base & operator=(concurrent_unordered_base &&other)
void allocator_move_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
bool_constant< true > true_type
Definition: tbb_stddef.h:468
std::pair< const_iterator, const_iterator > paircc_t
static sokey_t get_safe_order_key(const raw_const_iterator &it)
void internal_swap_buckets(concurrent_unordered_base &right)
tbb::internal::allocator_rebind< allocator_type, node >::type my_node_allocator
allocator_type::value_type value_type
void erase_node(raw_iterator previous, raw_const_iterator &where, tbb::internal::false_type)
const_local_iterator unsafe_begin(size_type bucket) const
solist_iterator< self_type, const value_type > const_iterator
std::pair< iterator, bool > internal_insert(__TBB_FORWARDING_REF(ValueType) value, nodeptr_t pnode=NULL)
solist_iterator(nodeptr_t pnode, const Solist *plist)
const_local_iterator unsafe_cend(size_type bucket) const
std::pair< const_iterator, const_iterator > equal_range(const key_type &key) const
std::pair< iterator, iterator > equal_range(const key_type &key)
void allocator_swap(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
nodeptr_t erase_node_impl(raw_iterator previous, raw_const_iterator &where)
concurrent_unordered_base & operator=(const concurrent_unordered_base &right)
friend bool operator==(const flist_iterator< M, T > &i, const flist_iterator< M, U > &j)
void set_midpoint() const
Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node.
friend bool operator==(const solist_iterator< M, T > &i, const solist_iterator< M, U > &j)
value_type compare_and_swap(value_type value, value_type comparand)
Definition: atomic.h:285
concurrent_unordered_base(size_type n_of_buckets=initial_bucket_number, const hash_compare &hc=hash_compare(), const allocator_type &a=allocator_type())
void erase_node(raw_iterator previous, raw_const_iterator &where)
static size_type internal_distance(const_iterator first, const_iterator last)
#define __TBB_STATIC_ASSERT(condition, msg)
Definition: tbb_stddef.h:532
void check_range(raw_iterator first, raw_iterator last)

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.