Intel(R) Threading Building Blocks Doxygen Documentation version 4.2.3
Loading...
Searching...
No Matches
concurrent_hash_map.h
Go to the documentation of this file.
1/*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#ifndef __TBB_concurrent_hash_map_H
18#define __TBB_concurrent_hash_map_H
19
20#define __TBB_concurrent_hash_map_H_include_area
22
23#include "tbb_stddef.h"
24#include <iterator>
25#include <utility> // Need std::pair
26#include <cstring> // Need std::memset
27#include __TBB_STD_SWAP_HEADER
28
29#include "tbb_allocator.h"
30#include "spin_rw_mutex.h"
31#include "atomic.h"
32#include "tbb_exception.h"
33#include "tbb_profiling.h"
34#include "aligned_space.h"
38#if __TBB_INITIALIZER_LISTS_PRESENT
39#include <initializer_list>
40#endif
41#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
42#include <typeinfo>
43#endif
44#if __TBB_STATISTICS
45#include <stdio.h>
46#endif
47#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
48// Definition of __TBB_CPP11_RVALUE_REF_PRESENT includes __TBB_CPP11_TUPLE_PRESENT
49// for most of platforms, tuple present macro was added for logical correctness
50#include <tuple>
51#endif
52
53namespace tbb {
54
55namespace interface5 {
56
57 template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<const Key, T> > >
59
61 namespace internal {
62 using namespace tbb::internal;
63
64
66 typedef size_t hashcode_t;
76 };
78 static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
80 static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
83 public:
85 typedef size_t size_type;
87 typedef size_t hashcode_t;
89 typedef size_t segment_index_t;
100 };
102 static size_type const embedded_block = 1;
106 static size_type const first_block = 8; //including embedded_block. perfect with bucket size 16, so the allocations are power of 4096
108 static size_type const pointers_per_table = sizeof(segment_index_t) * 8; // one segment per bit
114 atomic<hashcode_t> my_mask;
118 atomic<size_type> my_size; // It must be in separate cache line from my_mask due to performance effects
121#if __TBB_STATISTICS
122 atomic<unsigned> my_info_resizes; // concurrent ones
123 mutable atomic<unsigned> my_info_restarts; // race collisions
124 atomic<unsigned> my_info_rehashes; // invocations of rehash_bucket
125#endif
128 std::memset(my_table, 0, sizeof(my_table));
129 my_mask = 0;
130 my_size = 0;
131 std::memset(my_embedded_segment, 0, sizeof(my_embedded_segment));
132 for( size_type i = 0; i < embedded_block; i++ ) // fill the table
135 __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
136#if __TBB_STATISTICS
137 my_info_resizes = 0; // concurrent ones
138 my_info_restarts = 0; // race collisions
139 my_info_rehashes = 0; // invocations of rehash_bucket
140#endif
141 }
142
145 return segment_index_t( __TBB_Log2( index|1 ) );
146 }
147
150 return (segment_index_t(1)<<k & ~segment_index_t(1));
151 }
152
155 return size_type(1)<<k; // fake value for k==0
156 }
157
159 static bool is_valid( void *ptr ) {
160 return reinterpret_cast<uintptr_t>(ptr) > uintptr_t(63);
161 }
162
164 static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
165 if( is_initial ) std::memset( static_cast<void*>(ptr), 0, sz*sizeof(bucket) );
166 else for(size_type i = 0; i < sz; i++, ptr++) {
167 *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
168 ptr->node_list = rehash_req;
169 }
170 }
171
173 static void add_to_bucket( bucket *b, node_base *n ) {
174 __TBB_ASSERT(b->node_list != rehash_req, NULL);
175 n->next = b->node_list;
176 b->node_list = n; // its under lock and flag is set
177 }
178
184 if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress
185 }
186 };
187
189 template<typename Allocator>
190 void enable_segment( segment_index_t k, const Allocator& allocator, bool is_initial = false ) {
191 typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
192 typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
193 bucket_allocator_type bucket_allocator(allocator);
194 __TBB_ASSERT( k, "Zero segment must be embedded" );
195 enable_segment_failsafe watchdog( my_table, k );
196 size_type sz;
197 __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
198 if( k >= first_block ) {
199 sz = segment_size( k );
200 segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz);
201 init_buckets( ptr, sz, is_initial );
202 itt_hide_store_word( my_table[k], ptr );
203 sz <<= 1;// double it to get entire capacity of the container
204 } else { // the first block
205 __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
207 segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz - embedded_buckets);
208 init_buckets( ptr, sz - embedded_buckets, is_initial );
210 for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets
212 }
214 watchdog.my_segment_ptr = 0;
215 }
216
217 template<typename Allocator>
218 void delete_segment(segment_index_t s, const Allocator& allocator) {
219 typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
220 typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
221 bucket_allocator_type bucket_allocator(allocator);
222 segment_ptr_t buckets_ptr = my_table[s];
223 size_type sz = segment_size( s ? s : 1 );
224
225 if( s >= first_block) // the first segment or the next
226 bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, sz);
227 else if( s == embedded_block && embedded_block != first_block )
228 bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr,
230 if( s >= embedded_block ) my_table[s] = 0;
231 }
232
234 bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere?
236 h -= segment_base(s);
237 segment_ptr_t seg = my_table[s];
238 __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
239 return &seg[h];
240 }
241
242 // internal serial rehashing helper
245 while( segment_ptr_t seg = my_table[++s] )
246 if( seg[h].node_list == rehash_req ) {
247 seg[h].node_list = empty_rehashed;
248 mark_rehashed_levels( h + ((hashcode_t)1<<s) ); // optimized segment_base(s)
249 }
250 }
251
253 // Splitting into two functions should help inlining
254 inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
255 hashcode_t m_now, m_old = m;
257 if( m_old != m_now )
258 return check_rehashing_collision( h, m_old, m = m_now );
259 return false;
260 }
261
264 __TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimized out by passing h = h&m
265 if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event
266 // condition above proves that 'h' has some other bits set beside 'm_old'
267 // find next applicable mask after m_old //TODO: look at bsl instruction
268 for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size
269 ;
270 m_old = (m_old<<1) - 1; // get full mask from a bit
271 __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
272 // check whether it is rehashing/ed
273 if( itt_load_word_with_acquire(get_bucket(h & m_old)->node_list) != rehash_req )
274 {
275#if __TBB_STATISTICS
276 my_info_restarts++; // race collisions
277#endif
278 return true;
279 }
280 }
281 return false;
282 }
283
286 size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted
287 add_to_bucket( b, n );
288 // check load factor
289 if( sz >= mask ) { // TODO: add custom load_factor
290 segment_index_t new_seg = __TBB_Log2( mask+1 ); //optimized segment_index_of
291 __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
292 static const segment_ptr_t is_allocating = (segment_ptr_t)2;
293 if( !itt_hide_load_word(my_table[new_seg])
294 && as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL )
295 return new_seg; // The value must be processed
296 }
297 return 0;
298 }
299
301 template<typename Allocator>
302 void reserve(size_type buckets, const Allocator& allocator) {
303 if( !buckets-- ) return;
304 bool is_initial = !my_size;
305 for( size_type m = my_mask; buckets > m; m = my_mask )
306 enable_segment( segment_index_of( m+1 ), allocator, is_initial );
307 }
310 using std::swap;
311 swap(this->my_mask, table.my_mask);
312 swap(this->my_size, table.my_size);
313 for(size_type i = 0; i < embedded_buckets; i++)
314 swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
316 swap(this->my_table[i], table.my_table[i]);
317 }
318
319#if __TBB_CPP11_RVALUE_REF_PRESENT
321 my_mask = other.my_mask;
322 other.my_mask = embedded_buckets - 1;
323 my_size = other.my_size;
324 other.my_size = 0;
325
326 for(size_type i = 0; i < embedded_buckets; ++i) {
327 my_embedded_segment[i].node_list = other.my_embedded_segment[i].node_list;
328 other.my_embedded_segment[i].node_list = NULL;
329 }
330
331 for(size_type i = embedded_block; i < pointers_per_table; ++i) {
332 my_table[i] = other.my_table[i];
333 other.my_table[i] = NULL;
334 }
335 }
336#endif // __TBB_CPP11_RVALUE_REF_PRESENT
337 };
338
339 template<typename Iterator>
340 class hash_map_range;
341
343
345 template<typename Container, typename Value>
347 : public std::iterator<std::forward_iterator_tag,Value>
348 {
349 typedef Container map_type;
350 typedef typename Container::node node;
353
354 template<typename C, typename T, typename U>
356
357 template<typename C, typename T, typename U>
359
360 template<typename C, typename T, typename U>
361 friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
362
363 template<typename C, typename U>
364 friend class hash_map_iterator;
365
366 template<typename I>
367 friend class hash_map_range;
368
369 void advance_to_next_bucket() { // TODO?: refactor to iterator_base class
370 size_t k = my_index+1;
371 __TBB_ASSERT( my_bucket, "advancing an invalid iterator?");
372 while( k <= my_map->my_mask ) {
373 // Following test uses 2's-complement wizardry
374 if( k&(k-2) ) // not the beginning of a segment
375 ++my_bucket;
376 else my_bucket = my_map->get_bucket( k );
377 my_node = static_cast<node*>( my_bucket->node_list );
379 my_index = k; return;
380 }
381 ++k;
382 }
383 my_bucket = 0; my_node = 0; my_index = k; // the end
384 }
385#if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
386 template<typename Key, typename T, typename HashCompare, typename A>
388#else
389 public: // workaround
390#endif
392 const Container *my_map;
393
395 size_t my_index;
396
399
402
403 hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
404
405 public:
409 my_map(other.my_map),
410 my_index(other.my_index),
411 my_bucket(other.my_bucket),
412 my_node(other.my_node)
413 {}
414
416 my_map = other.my_map;
417 my_index = other.my_index;
418 my_bucket = other.my_bucket;
419 my_node = other.my_node;
420 return *this;
421 }
422 Value& operator*() const {
423 __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
424 return my_node->value();
425 }
426 Value* operator->() const {return &operator*();}
428
431 hash_map_iterator old(*this);
432 operator++();
433 return old;
434 }
435 };
436
437 template<typename Container, typename Value>
438 hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
439 my_map(&map),
440 my_index(index),
441 my_bucket(b),
442 my_node( static_cast<node*>(n) )
443 {
444 if( b && !hash_map_base::is_valid(n) )
446 }
447
448 template<typename Container, typename Value>
450 my_node = static_cast<node*>( my_node->next );
451 if( !my_node ) advance_to_next_bucket();
452 return *this;
453 }
454
455 template<typename Container, typename T, typename U>
457 return i.my_node == j.my_node && i.my_map == j.my_map;
458 }
459
460 template<typename Container, typename T, typename U>
462 return i.my_node != j.my_node || i.my_map != j.my_map;
463 }
464
466
467 template<typename Iterator>
469 typedef typename Iterator::map_type map_type;
470 Iterator my_begin;
471 Iterator my_end;
472 mutable Iterator my_midpoint;
475 void set_midpoint() const;
476 template<typename U> friend class hash_map_range;
477 public:
479 typedef std::size_t size_type;
480 typedef typename Iterator::value_type value_type;
481 typedef typename Iterator::reference reference;
482 typedef typename Iterator::difference_type difference_type;
483 typedef Iterator iterator;
484
486 bool empty() const {return my_begin==my_end;}
487
489 bool is_divisible() const {
490 return my_midpoint!=my_end;
491 }
494 my_end(r.my_end),
496 {
498 __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
499 __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
500 set_midpoint();
501 r.set_midpoint();
502 }
504 template<typename U>
507 my_end(r.my_end),
510 {}
512 hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
513 my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
514 my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
515 my_grainsize( grainsize_ )
516 {
517 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
518 set_midpoint();
519 }
520 const Iterator& begin() const {return my_begin;}
521 const Iterator& end() const {return my_end;}
524 };
525
526 template<typename Iterator>
528 // Split by groups of nodes
529 size_t m = my_end.my_index-my_begin.my_index;
530 if( m > my_grainsize ) {
531 m = my_begin.my_index + m/2u;
532 hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
533 my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
534 } else {
535 my_midpoint = my_end;
536 }
537 __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
538 "my_begin is after my_midpoint" );
539 __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
540 "my_midpoint is after my_end" );
541 __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
542 "[my_begin, my_midpoint) range should not be empty" );
543 }
544
545 } // internal
547
548#if _MSC_VER && !defined(__INTEL_COMPILER)
549 // Suppress "conditional expression is constant" warning.
550 #pragma warning( push )
551 #pragma warning( disable: 4127 )
552#endif
553
555
584template<typename Key, typename T, typename HashCompare, typename Allocator>
586 template<typename Container, typename Value>
588
589 template<typename I>
591
592public:
593 typedef Key key_type;
594 typedef T mapped_type;
595 typedef std::pair<const Key,T> value_type;
596 typedef hash_map_base::size_type size_type;
597 typedef ptrdiff_t difference_type;
599 typedef const value_type *const_pointer;
606 typedef Allocator allocator_type;
607
608protected:
609 friend class const_accessor;
610 class node;
614 HashCompare my_hash_compare;
615
616 class node : public node_base {
617 tbb::aligned_space<value_type> my_value;
618 public:
619 value_type* storage() { return my_value.begin(); }
620 value_type& value() { return *storage(); }
621 };
622
624 node_allocator_traits::destroy(my_allocator, static_cast<node*>(n)->storage());
627 }
628
632
635 if(my_node) {
638 }
639 }
640 void dismiss() { my_node = NULL; }
641 };
642
643#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
644 template<typename... Args>
645 static node* create_node(node_allocator_type& allocator, Args&&... args)
646#else
647 template<typename Arg1, typename Arg2>
648 static node* create_node(node_allocator_type& allocator, __TBB_FORWARDING_REF(Arg1) arg1, __TBB_FORWARDING_REF(Arg2) arg2)
649#endif
650 {
651 node* node_ptr = node_allocator_traits::allocate(allocator, 1);
652 node_scoped_guard guard(node_ptr, allocator);
653 node_allocator_traits::construct(allocator, node_ptr);
654#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
655 node_allocator_traits::construct(allocator, node_ptr->storage(), std::forward<Args>(args)...);
656#else
657 node_allocator_traits::construct(allocator, node_ptr->storage(), tbb::internal::forward<Arg1>(arg1), tbb::internal::forward<Arg2>(arg2));
658#endif
659 guard.dismiss();
660 return node_ptr;
661 }
662
663 static node* allocate_node_copy_construct(node_allocator_type& allocator, const Key &key, const T * t){
664 return create_node(allocator, key, *t);
665 }
666
667#if __TBB_CPP11_RVALUE_REF_PRESENT
668 static node* allocate_node_move_construct(node_allocator_type& allocator, const Key &key, const T * t){
669 return create_node(allocator, key, std::move(*const_cast<T*>(t)));
670 }
671#endif
672
673 static node* allocate_node_default_construct(node_allocator_type& allocator, const Key &key, const T * ){
674#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
675 // Emplace construct an empty T object inside the pair
676 return create_node(allocator, std::piecewise_construct,
677 std::forward_as_tuple(key), std::forward_as_tuple());
678#else
679 // Use of a temporary object is impossible, because create_node takes a non-const reference.
680 // copy-initialization is possible because T is already required to be CopyConstructible.
681 T obj = T();
682 return create_node(allocator, key, tbb::internal::move(obj));
683#endif
684 }
685
686 static node* do_not_allocate_node(node_allocator_type& , const Key &, const T * ){
687 __TBB_ASSERT(false,"this dummy function should not be called");
688 return NULL;
689 }
690
691 node *search_bucket( const key_type &key, bucket *b ) const {
692 node *n = static_cast<node*>( b->node_list );
693 while( is_valid(n) && !my_hash_compare.equal(key, n->value().first) )
694 n = static_cast<node*>( n->next );
695 __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
696 return n;
697 }
698
700 class bucket_accessor : public bucket::scoped_t {
702 public:
703 bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
705 inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
706 my_b = base->get_bucket( h );
707 // TODO: actually, notification is unnecessary here, just hiding double-check
709 && try_acquire( my_b->mutex, /*write=*/true ) )
710 {
711 if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing
712 }
713 else bucket::scoped_t::acquire( my_b->mutex, writer );
715 }
717 bool is_writer() { return bucket::scoped_t::is_writer; }
719 bucket *operator() () { return my_b; }
720 };
721
722 // TODO refactor to hash_base
723 void rehash_bucket( bucket *b_new, const hashcode_t h ) {
724 __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
725 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
727 hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
728#if __TBB_STATISTICS
729 my_info_rehashes++; // invocations of rehash_bucket
730#endif
731
732 bucket_accessor b_old( this, h & mask );
733
734 mask = (mask<<1) | 1; // get full mask for new bucket
735 __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
736 restart:
737 for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
738 hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->value().first );
739#if TBB_USE_ASSERT
740 hashcode_t bmask = h & (mask>>1);
741 bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket
742 __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
743#endif
744 if( (c & mask) == h ) {
745 if( !b_old.is_writer() )
746 if( !b_old.upgrade_to_writer() ) {
747 goto restart; // node ptr can be invalid due to concurrent erase
748 }
749 *p = n->next; // exclude from b_old
750 add_to_bucket( b_new, n );
751 } else p = &n->next; // iterate to next item
752 }
753 }
754
758 void dismiss() {my_ch_map = 0;}
760 if (my_ch_map){
761 my_ch_map->clear();
762 }
763 }
764 };
765public:
766
767 class accessor;
769 class const_accessor : private node::scoped_t /*which derived from no_copy*/ {
770 friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
771 friend class accessor;
772 public:
775
777 bool empty() const { return !my_node; }
778
780 void release() {
781 if( my_node ) {
783 my_node = 0;
784 }
785 }
786
789 __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
790 return my_node->value();
791 }
792
795 return &operator*();
796 }
797
800
803 my_node = NULL; // scoped lock's release() is called in its destructor
804 }
805 protected:
809 };
810
812 class accessor: public const_accessor {
813 public:
816
819 __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
820 return this->my_node->value();
821 }
822
824 pointer operator->() const {
825 return &operator*();
826 }
827 };
828
832 {}
833
834 explicit concurrent_hash_map( const HashCompare& compare, const allocator_type& a = allocator_type() )
836 {}
837
841 {
842 reserve( n, my_allocator );
843 }
844
845 concurrent_hash_map( size_type n, const HashCompare& compare, const allocator_type& a = allocator_type() )
847 {
848 reserve( n, my_allocator );
849 }
850
854 my_allocator(node_allocator_traits::select_on_container_copy_construction(table.get_allocator()))
855 {
856 call_clear_on_leave scope_guard(this);
857 internal_copy(table);
858 scope_guard.dismiss();
859 }
860
863 {
864 call_clear_on_leave scope_guard(this);
865 internal_copy(table);
866 scope_guard.dismiss();
867 }
868
869#if __TBB_CPP11_RVALUE_REF_PRESENT
873 {
874 internal_move(std::move(table));
875 }
876
880 {
881 if (a == table.get_allocator()){
882 internal_move(std::move(table));
883 }else{
884 call_clear_on_leave scope_guard(this);
885 internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()), table.size());
886 scope_guard.dismiss();
887 }
888 }
889#endif //__TBB_CPP11_RVALUE_REF_PRESENT
890
892 template<typename I>
895 {
896 call_clear_on_leave scope_guard(this);
897 internal_copy(first, last, std::distance(first, last));
898 scope_guard.dismiss();
899 }
900
901 template<typename I>
902 concurrent_hash_map( I first, I last, const HashCompare& compare, const allocator_type& a = allocator_type() )
904 {
905 call_clear_on_leave scope_guard(this);
906 internal_copy(first, last, std::distance(first, last));
907 scope_guard.dismiss();
908 }
909
910#if __TBB_INITIALIZER_LISTS_PRESENT
912 concurrent_hash_map( std::initializer_list<value_type> il, const allocator_type &a = allocator_type() )
914 {
915 call_clear_on_leave scope_guard(this);
916 internal_copy(il.begin(), il.end(), il.size());
917 scope_guard.dismiss();
918 }
919
920 concurrent_hash_map( std::initializer_list<value_type> il, const HashCompare& compare, const allocator_type& a = allocator_type() )
922 {
923 call_clear_on_leave scope_guard(this);
924 internal_copy(il.begin(), il.end(), il.size());
925 scope_guard.dismiss();
926 }
927
928#endif //__TBB_INITIALIZER_LISTS_PRESENT
929
932 if( this!=&table ) {
934 clear();
936 internal_copy(table);
937 }
938 return *this;
939 }
940
941#if __TBB_CPP11_RVALUE_REF_PRESENT
944 if(this != &table) {
946 internal_move_assign(std::move(table), pocma_type());
947 }
948 return *this;
949 }
950#endif //__TBB_CPP11_RVALUE_REF_PRESENT
951
952#if __TBB_INITIALIZER_LISTS_PRESENT
954 concurrent_hash_map& operator=( std::initializer_list<value_type> il ) {
955 clear();
956 internal_copy(il.begin(), il.end(), il.size());
957 return *this;
958 }
959#endif //__TBB_INITIALIZER_LISTS_PRESENT
960
961
963
965 void rehash(size_type n = 0);
966
968 void clear();
969
972
973 //------------------------------------------------------------------------
974 // Parallel algorithm support
975 //------------------------------------------------------------------------
976 range_type range( size_type grainsize=1 ) {
977 return range_type( *this, grainsize );
978 }
979 const_range_type range( size_type grainsize=1 ) const {
980 return const_range_type( *this, grainsize );
981 }
982
983 //------------------------------------------------------------------------
984 // STL support - not thread-safe methods
985 //------------------------------------------------------------------------
987 iterator end() { return iterator( *this, 0, 0, 0 ); }
989 const_iterator end() const { return const_iterator( *this, 0, 0, 0 ); }
990 std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range( key, end() ); }
991 std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range( key, end() ); }
992
994 size_type size() const { return my_size; }
995
997 bool empty() const { return my_size == 0; }
998
1000 size_type max_size() const {return (~size_type(0))/sizeof(node);}
1001
1003 size_type bucket_count() const { return my_mask+1; }
1004
1007
1009 void swap( concurrent_hash_map &table );
1010
1011 //------------------------------------------------------------------------
1012 // concurrent map operations
1013 //------------------------------------------------------------------------
1014
1016 size_type count( const Key &key ) const {
1017 return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false, &do_not_allocate_node );
1018 }
1019
1021
1022 bool find( const_accessor &result, const Key &key ) const {
1023 result.release();
1024 return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false, &do_not_allocate_node );
1025 }
1026
1028
1029 bool find( accessor &result, const Key &key ) {
1030 result.release();
1031 return lookup(/*insert*/false, key, NULL, &result, /*write=*/true, &do_not_allocate_node );
1032 }
1033
1035
1036 bool insert( const_accessor &result, const Key &key ) {
1037 result.release();
1038 return lookup(/*insert*/true, key, NULL, &result, /*write=*/false, &allocate_node_default_construct );
1039 }
1040
1042
1043 bool insert( accessor &result, const Key &key ) {
1044 result.release();
1045 return lookup(/*insert*/true, key, NULL, &result, /*write=*/true, &allocate_node_default_construct );
1046 }
1047
1049
1050 bool insert( const_accessor &result, const value_type &value ) {
1051 result.release();
1052 return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct );
1053 }
1054
1056
1057 bool insert( accessor &result, const value_type &value ) {
1058 result.release();
1059 return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct );
1060 }
1061
1063
1064 bool insert( const value_type &value ) {
1065 return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false, &allocate_node_copy_construct );
1066 }
1067
1068#if __TBB_CPP11_RVALUE_REF_PRESENT
1070
1072 return generic_move_insert(result, std::move(value));
1073 }
1074
1076
1077 bool insert( accessor &result, value_type && value ) {
1078 return generic_move_insert(result, std::move(value));
1079 }
1080
1082
1084 return generic_move_insert(accessor_not_used(), std::move(value));
1085 }
1086
1087#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1089
1090 template<typename... Args>
1091 bool emplace( const_accessor &result, Args&&... args ) {
1092 return generic_emplace(result, std::forward<Args>(args)...);
1093 }
1094
1096
1097 template<typename... Args>
1098 bool emplace( accessor &result, Args&&... args ) {
1099 return generic_emplace(result, std::forward<Args>(args)...);
1100 }
1101
1103
1104 template<typename... Args>
1105 bool emplace( Args&&... args ) {
1106 return generic_emplace(accessor_not_used(), std::forward<Args>(args)...);
1107 }
1108#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1109#endif //__TBB_CPP11_RVALUE_REF_PRESENT
1110
1112 template<typename I>
1113 void insert( I first, I last ) {
1114 for ( ; first != last; ++first )
1115 insert( *first );
1116 }
1117
1118#if __TBB_INITIALIZER_LISTS_PRESENT
1120 void insert( std::initializer_list<value_type> il ) {
1121 insert( il.begin(), il.end() );
1122 }
1123#endif //__TBB_INITIALIZER_LISTS_PRESENT
1124
1126
1127 bool erase( const Key& key );
1128
1130
1131 bool erase( const_accessor& item_accessor ) {
1132 return exclude( item_accessor );
1133 }
1134
1136
1137 bool erase( accessor& item_accessor ) {
1138 return exclude( item_accessor );
1139 }
1140
1141protected:
1143 bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key &, const T * ), node *tmp_n = 0 ) ;
1144
1145 struct accessor_not_used { void release(){}};
1146 friend const_accessor* accessor_location( accessor_not_used const& ){ return NULL;}
1148
1149 friend bool is_write_access_needed( accessor const& ) { return true;}
1150 friend bool is_write_access_needed( const_accessor const& ) { return false;}
1151 friend bool is_write_access_needed( accessor_not_used const& ) { return false;}
1152
1153#if __TBB_CPP11_RVALUE_REF_PRESENT
1154 template<typename Accessor>
1155 bool generic_move_insert( Accessor && result, value_type && value ) {
1156 result.release();
1157 return lookup(/*insert*/true, value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct );
1158 }
1159
1160#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1161 template<typename Accessor, typename... Args>
1162 bool generic_emplace( Accessor && result, Args &&... args ) {
1163 result.release();
1164 node * node_ptr = create_node(my_allocator, std::forward<Args>(args)...);
1165 return lookup(/*insert*/true, node_ptr->value().first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr );
1166 }
1167#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1168#endif //__TBB_CPP11_RVALUE_REF_PRESENT
1169
1171 bool exclude( const_accessor &item_accessor );
1172
1174 template<typename I>
1175 std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
1176
1178 void internal_copy( const concurrent_hash_map& source );
1179
1180 template<typename I>
1181 void internal_copy( I first, I last, size_type reserve_size );
1182
1183#if __TBB_CPP11_RVALUE_REF_PRESENT
1184 // A compile-time dispatch to allow move assignment of containers with non-movable value_type if POCMA is true_type
1187 internal_move(std::move(other));
1188 }
1189
1191 if (this->my_allocator == other.my_allocator) {
1192 internal_move(std::move(other));
1193 } else {
1194 //do per element move
1195 internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), other.size());
1196 }
1197 }
1198#endif
1199
1201
1204 hashcode_t h = my_hash_compare.hash( key );
1206 node *n;
1207 restart:
1208 __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1209 bucket *b = get_bucket( h & m );
1210 // TODO: actually, notification is unnecessary here, just hiding double-check
1212 {
1213 bucket::scoped_t lock;
1214 if( lock.try_acquire( b->mutex, /*write=*/true ) ) {
1216 const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m ); //recursive rehashing
1217 }
1218 else lock.acquire( b->mutex, /*write=*/false );
1220 }
1221 n = search_bucket( key, b );
1222 if( n )
1223 return n->storage();
1224 else if( check_mask_race( h, m ) )
1225 goto restart;
1226 return 0;
1227 }
1228};
1229
1230#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1231namespace internal {
1232using namespace tbb::internal;
1233
1234template<template<typename...> typename Map, typename Key, typename T, typename... Args>
1235using hash_map_t = Map<
1236 Key, T,
1237 std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >,
1238 pack_element_t<0, Args...>, tbb_hash_compare<Key> >,
1239 std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t<sizeof...(Args)-1, Args...> >,
1240 pack_element_t<sizeof...(Args)-1, Args...>, tbb_allocator<std::pair<const Key, T> > >
1241>;
1242}
1243
1244// Deduction guide for the constructor from two iterators and hash_compare/ allocator
1245template<typename I, typename... Args>
1246concurrent_hash_map(I, I, Args...)
1247-> internal::hash_map_t<concurrent_hash_map, internal::iterator_key_t<I>,internal::iterator_mapped_t<I>, Args...>;
1248
1249// Deduction guide for the constructor from an initializer_list and hash_compare/ allocator
1250// Deduction guide for an initializer_list, hash_compare and allocator is implicit
1251template<typename Key, typename T, typename CompareOrAllocator>
1252concurrent_hash_map(std::initializer_list<std::pair<const Key, T>>, CompareOrAllocator)
1253-> internal::hash_map_t<concurrent_hash_map, Key, T, CompareOrAllocator>;
1254
1255#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
1256
1257template<typename Key, typename T, typename HashCompare, typename A>
1258bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*), node *tmp_n ) {
1259 __TBB_ASSERT( !result || !result->my_node, NULL );
1260 bool return_value;
1261 hashcode_t const h = my_hash_compare.hash( key );
1263 segment_index_t grow_segment = 0;
1264 node *n;
1265 restart:
1266 {//lock scope
1267 __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1268 return_value = false;
1269 // get bucket
1270 bucket_accessor b( this, h & m );
1271
1272 // find a node
1273 n = search_bucket( key, b() );
1274 if( op_insert ) {
1275 // [opt] insert a key
1276 if( !n ) {
1277 if( !tmp_n ) {
1278 tmp_n = allocate_node(my_allocator, key, t);
1279 }
1280 if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion
1281 // Rerun search_list, in case another thread inserted the item during the upgrade.
1282 n = search_bucket( key, b() );
1283 if( is_valid(n) ) { // unfortunately, it did
1284 b.downgrade_to_reader();
1285 goto exists;
1286 }
1287 }
1288 if( check_mask_race(h, m) )
1289 goto restart; // b.release() is done in ~b().
1290 // insert and set flag to grow the container
1291 grow_segment = insert_new_node( b(), n = tmp_n, m );
1292 tmp_n = 0;
1293 return_value = true;
1294 }
1295 } else { // find or count
1296 if( !n ) {
1297 if( check_mask_race( h, m ) )
1298 goto restart; // b.release() is done in ~b(). TODO: replace by continue
1299 return false;
1300 }
1301 return_value = true;
1302 }
1303 exists:
1304 if( !result ) goto check_growth;
1305 // TODO: the following seems as generic/regular operation
1306 // acquire the item
1307 if( !result->try_acquire( n->mutex, write ) ) {
1308 for( tbb::internal::atomic_backoff backoff(true);; ) {
1309 if( result->try_acquire( n->mutex, write ) ) break;
1310 if( !backoff.bounded_pause() ) {
1311 // the wait takes really long, restart the operation
1312 b.release();
1313 __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
1314 __TBB_Yield();
1315 m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1316 goto restart;
1317 }
1318 }
1319 }
1320 }//lock scope
1321 result->my_node = n;
1322 result->my_hash = h;
1323check_growth:
1324 // [opt] grow the container
1325 if( grow_segment ) {
1326#if __TBB_STATISTICS
1327 my_info_resizes++; // concurrent ones
1328#endif
1329 enable_segment( grow_segment, my_allocator );
1330 }
1331 if( tmp_n ) // if op_insert only
1332 delete_node( tmp_n );
1333 return return_value;
1334}
1335
1336template<typename Key, typename T, typename HashCompare, typename A>
1337template<typename I>
1338std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
1339 hashcode_t h = my_hash_compare.hash( key );
1340 hashcode_t m = my_mask;
1341 __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1342 h &= m;
1343 bucket *b = get_bucket( h );
1344 while( b->node_list == internal::rehash_req ) {
1345 m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1346 b = get_bucket( h &= m );
1347 }
1348 node *n = search_bucket( key, b );
1349 if( !n )
1350 return std::make_pair(end_, end_);
1351 iterator lower(*this, h, b, n), upper(lower);
1352 return std::make_pair(lower, ++upper);
1353}
1354
1355template<typename Key, typename T, typename HashCompare, typename A>
1357 __TBB_ASSERT( item_accessor.my_node, NULL );
1358 node_base *const n = item_accessor.my_node;
1359 hashcode_t const h = item_accessor.my_hash;
1361 do {
1362 // get bucket
1363 bucket_accessor b( this, h & m, /*writer=*/true );
1364 node_base **p = &b()->node_list;
1365 while( *p && *p != n )
1366 p = &(*p)->next;
1367 if( !*p ) { // someone else was first
1368 if( check_mask_race( h, m ) )
1369 continue;
1370 item_accessor.release();
1371 return false;
1372 }
1373 __TBB_ASSERT( *p == n, NULL );
1374 *p = n->next; // remove from container
1375 my_size--;
1376 break;
1377 } while(true);
1378 if( !item_accessor.is_writer() ) // need to get exclusive lock
1379 item_accessor.upgrade_to_writer(); // return value means nothing here
1380 item_accessor.release();
1381 delete_node( n ); // Only one thread can delete it
1382 return true;
1383}
1384
1385template<typename Key, typename T, typename HashCompare, typename A>
1387 node_base *n;
1388 hashcode_t const h = my_hash_compare.hash( key );
1390restart:
1391 {//lock scope
1392 // get bucket
1393 bucket_accessor b( this, h & m );
1394 search:
1395 node_base **p = &b()->node_list;
1396 n = *p;
1397 while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->value().first ) ) {
1398 p = &n->next;
1399 n = *p;
1400 }
1401 if( !n ) { // not found, but mask could be changed
1402 if( check_mask_race( h, m ) )
1403 goto restart;
1404 return false;
1405 }
1406 else if( !b.is_writer() && !b.upgrade_to_writer() ) {
1407 if( check_mask_race( h, m ) ) // contended upgrade, check mask
1408 goto restart;
1409 goto search;
1410 }
1411 *p = n->next;
1412 my_size--;
1413 }
1414 {
1415 typename node::scoped_t item_locker( n->mutex, /*write=*/true );
1416 }
1417 // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
1418 delete_node( n ); // Only one thread can delete it due to write lock on the bucket
1419 return true;
1420}
1421
1422template<typename Key, typename T, typename HashCompare, typename A>
1424 typedef typename node_allocator_traits::propagate_on_container_swap pocs_type;
1425 if (this != &table && (pocs_type::value || my_allocator == table.my_allocator)) {
1426 using std::swap;
1427 tbb::internal::allocator_swap(this->my_allocator, table.my_allocator, pocs_type());
1428 swap(this->my_hash_compare, table.my_hash_compare);
1429 internal_swap(table);
1430 }
1431}
1432
1433template<typename Key, typename T, typename HashCompare, typename A>
1435 reserve( sz, my_allocator ); // TODO: add reduction of number of buckets as well
1436 hashcode_t mask = my_mask;
1437 hashcode_t b = (mask+1)>>1; // size or first index of the last segment
1438 __TBB_ASSERT((b&(b-1))==0, NULL); // zero or power of 2
1439 bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing
1440 for(; b <= mask; b++, bp++ ) {
1441 node_base *n = bp->node_list;
1442 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1443 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1444 if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one
1445 hashcode_t h = b; bucket *b_old = bp;
1446 do {
1447 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
1448 hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1449 b_old = get_bucket( h &= m );
1450 } while( b_old->node_list == internal::rehash_req );
1451 // now h - is index of the root rehashed bucket b_old
1452 mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments
1453 for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
1454 hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->value().first );
1455 if( (c & mask) != h ) { // should be rehashed
1456 *p = q->next; // exclude from b_old
1457 bucket *b_new = get_bucket( c & mask );
1458 __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
1459 add_to_bucket( b_new, q );
1460 } else p = &q->next; // iterate to next item
1461 }
1462 }
1463 }
1464#if TBB_USE_PERFORMANCE_WARNINGS
1465 int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1466 static bool reported = false;
1467#endif
1468#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1469 for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing
1470 if( b & (b-2) ) ++bp; // not the beginning of a segment
1471 else bp = get_bucket( b );
1472 node_base *n = bp->node_list;
1473 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1474 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
1475#if TBB_USE_PERFORMANCE_WARNINGS
1476 if( n == internal::empty_rehashed ) empty_buckets++;
1477 else if( n->next ) overpopulated_buckets++;
1478#endif
1479#if TBB_USE_ASSERT
1480 for( ; is_valid(n); n = n->next ) {
1481 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first ) & mask;
1482 __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
1483 }
1484#endif
1485 }
1486#endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1487#if TBB_USE_PERFORMANCE_WARNINGS
1488 if( buckets > current_size) empty_buckets -= buckets - current_size;
1489 else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1490 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1492 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1494 typeid(*this).name(),
1495#else
1496 "concurrent_hash_map",
1497#endif
1498 current_size, empty_buckets, overpopulated_buckets );
1499 reported = true;
1500 }
1501#endif
1502}
1503
1504template<typename Key, typename T, typename HashCompare, typename A>
1506 hashcode_t m = my_mask;
1507 __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1508#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1509#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1510 int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1511 static bool reported = false;
1512#endif
1513 bucket *bp = 0;
1514 // check consistency
1515 for( segment_index_t b = 0; b <= m; b++ ) {
1516 if( b & (b-2) ) ++bp; // not the beginning of a segment
1517 else bp = get_bucket( b );
1518 node_base *n = bp->node_list;
1519 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1520 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
1521#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1522 if( n == internal::empty_rehashed ) empty_buckets++;
1523 else if( n == internal::rehash_req ) buckets--;
1524 else if( n->next ) overpopulated_buckets++;
1525#endif
1526#if __TBB_EXTRA_DEBUG
1527 for(; is_valid(n); n = n->next ) {
1528 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first );
1529 h &= m;
1530 __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
1531 }
1532#endif
1533 }
1534#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1535#if __TBB_STATISTICS
1536 printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"
1537 " concurrent: resizes=%u rehashes=%u restarts=%u\n",
1538 current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets,
1539 unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) );
1540 my_info_resizes = 0; // concurrent ones
1541 my_info_restarts = 0; // race collisions
1542 my_info_rehashes = 0; // invocations of rehash_bucket
1543#endif
1544 if( buckets > current_size) empty_buckets -= buckets - current_size;
1545 else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1546 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1548 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1550 typeid(*this).name(),
1551#else
1552 "concurrent_hash_map",
1553#endif
1554 current_size, empty_buckets, overpopulated_buckets );
1555 reported = true;
1556 }
1557#endif
1558#endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1559 my_size = 0;
1560 segment_index_t s = segment_index_of( m );
1561 __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
1562 do {
1563 __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
1564 segment_ptr_t buckets_ptr = my_table[s];
1565 size_type sz = segment_size( s ? s : 1 );
1566 for( segment_index_t i = 0; i < sz; i++ )
1567 for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
1568 buckets_ptr[i].node_list = n->next;
1569 delete_node( n );
1570 }
1571 delete_segment(s, my_allocator);
1572 } while(s-- > 0);
1573 my_mask = embedded_buckets - 1;
1574}
1575
1576template<typename Key, typename T, typename HashCompare, typename A>
1578 hashcode_t mask = source.my_mask;
1579 if( my_mask == mask ) { // optimized version
1580 reserve( source.my_size, my_allocator ); // TODO: load_factor?
1581 bucket *dst = 0, *src = 0;
1582 bool rehash_required = false;
1583 for( hashcode_t k = 0; k <= mask; k++ ) {
1584 if( k & (k-2) ) ++dst,src++; // not the beginning of a segment
1585 else { dst = get_bucket( k ); src = source.get_bucket( k ); }
1586 __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
1587 node *n = static_cast<node*>( src->node_list );
1588 if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets
1589 rehash_required = true;
1591 } else for(; n; n = static_cast<node*>( n->next ) ) {
1592 node* node_ptr = create_node(my_allocator, n->value().first, n->value().second);
1593 add_to_bucket( dst, node_ptr);
1594 ++my_size; // TODO: replace by non-atomic op
1595 }
1596 }
1597 if( rehash_required ) rehash();
1598 } else internal_copy( source.begin(), source.end(), source.my_size );
1599}
1600
1601template<typename Key, typename T, typename HashCompare, typename A>
1602template<typename I>
1604 reserve( reserve_size, my_allocator ); // TODO: load_factor?
1605 hashcode_t m = my_mask;
1606 for(; first != last; ++first) {
1607 hashcode_t h = my_hash_compare.hash( (*first).first );
1608 bucket *b = get_bucket( h & m );
1609 __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
1610 node* node_ptr = create_node(my_allocator, (*first).first, (*first).second);
1611 add_to_bucket( b, node_ptr );
1612 ++my_size; // TODO: replace by non-atomic op
1613 }
1614}
1615
1616} // namespace interface5
1617
1619
1620
1621template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1623 if(a.size() != b.size()) return false;
1626 for(; i != i_end; ++i) {
1627 j = b.equal_range(i->first).first;
1628 if( j == j_end || !(i->second == j->second) ) return false;
1629 }
1630 return true;
1631}
1632
1633template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1635{ return !(a == b); }
1636
1637template<typename Key, typename T, typename HashCompare, typename A>
1639{ a.swap( b ); }
1640
1641#if _MSC_VER && !defined(__INTEL_COMPILER)
1642 #pragma warning( pop )
1643#endif // warning 4127 is back
1644
1645} // namespace tbb
1646
1648#undef __TBB_concurrent_hash_map_H_include_area
1649
1650#endif /* __TBB_concurrent_hash_map_H */
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition tbb_stddef.h:165
#define __TBB_FORWARDING_REF(A)
Definition tbb_stddef.h:517
#define __TBB_USE_OPTIONAL_RTTI
Definition tbb_config.h:125
#define __TBB_Yield()
Definition ibm_aix51.h:44
#define __TBB_Log2(V)
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void const char const char int ITT_FORMAT __itt_group_sync p
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
STL namespace.
The graph class.
bool operator==(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
void move(tbb_thread &t1, tbb_thread &t2)
Definition tbb_thread.h:319
@ acquire
Acquire.
Definition atomic.h:57
bool operator!=(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
Identifiers declared inside namespace internal should never be used directly by client code.
Definition atomic.h:65
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
T __TBB_load_with_acquire(const volatile T &location)
void allocator_swap(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
void swap(atomic< T > &lhs, atomic< T > &rhs)
Definition atomic.h:564
void allocator_move_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
void allocator_copy_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
void itt_hide_store_word(T &dst, T src)
T itt_load_word_with_acquire(const tbb::atomic< T > &src)
atomic< T > & as_atomic(T &t)
Definition atomic.h:572
void itt_store_word_with_release(tbb::atomic< T > &dst, U src)
auto last(Container &c) -> decltype(begin(c))
auto first(Container &c) -> decltype(begin(c))
T itt_hide_load_word(const T &src)
void __TBB_store_with_release(volatile T &location, V value)
static hash_map_node_base *const empty_rehashed
Rehashed empty bucket flag.
static hash_map_node_base *const rehash_req
Incompleteness flag value.
size_t hashcode_t
Type of a hash code.
internal::hash_map_range< const_iterator > const_range_type
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment.
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
concurrent_hash_map(I first, I last, const HashCompare &compare, const allocator_type &a=allocator_type())
static node * do_not_allocate_node(node_allocator_type &, const Key &, const T *)
void insert(I first, I last)
Insert range [first, last)
concurrent_hash_map(concurrent_hash_map &&table, const allocator_type &a)
Move constructor.
node * search_bucket(const key_type &key, bucket *b) const
bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node *(*allocate_node)(node_allocator_type &, const Key &, const T *), node *tmp_n=0)
Insert or find item and optionally acquire a lock on the item.
friend bool is_write_access_needed(const_accessor const &)
tbb::internal::allocator_rebind< Allocator, node >::type node_allocator_type
std::pair< const_iterator, const_iterator > equal_range(const Key &key) const
range_type range(size_type grainsize=1)
concurrent_hash_map(const allocator_type &a=allocator_type())
Construct empty table.
concurrent_hash_map(std::initializer_list< value_type > il, const HashCompare &compare, const allocator_type &a=allocator_type())
bool empty() const
True if size()==0.
void internal_move_assign(concurrent_hash_map &&other, tbb::internal::traits_true_type)
std::pair< I, I > internal_equal_range(const Key &key, I end) const
Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)
concurrent_hash_map(const HashCompare &compare, const allocator_type &a=allocator_type())
void swap(concurrent_hash_map &table)
swap two instances. Iterators are invalidated
internal::hash_map_iterator< concurrent_hash_map, const value_type > const_iterator
internal::hash_map_range< iterator > range_type
bool erase(const Key &key)
Erase item.
concurrent_hash_map & operator=(std::initializer_list< value_type > il)
Assignment.
bool erase(const_accessor &item_accessor)
Erase item by const_accessor.
concurrent_hash_map(const concurrent_hash_map &table, const allocator_type &a)
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
concurrent_hash_map(std::initializer_list< value_type > il, const allocator_type &a=allocator_type())
Construct empty table with n preallocated buckets. This number serves also as initial concurrency lev...
concurrent_hash_map & operator=(concurrent_hash_map &&table)
Move Assignment.
size_type max_size() const
Upper bound on size.
bool exclude(const_accessor &item_accessor)
delete item by accessor
size_type bucket_count() const
Returns the current number of buckets.
concurrent_hash_map(size_type n, const HashCompare &compare, const allocator_type &a=allocator_type())
friend bool is_write_access_needed(accessor_not_used const &)
static node * create_node(node_allocator_type &allocator, Args &&... args)
friend const_accessor * accessor_location(const_accessor &a)
void rehash_bucket(bucket *b_new, const hashcode_t h)
concurrent_hash_map(concurrent_hash_map &&table)
Move constructor.
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
std::pair< iterator, iterator > equal_range(const Key &key)
concurrent_hash_map(size_type n, const allocator_type &a=allocator_type())
Construct empty table with n preallocated buckets. This number serves also as initial concurrency lev...
size_type size() const
Number of items in table.
bool generic_emplace(Accessor &&result, Args &&... args)
size_type count(const Key &key) const
Return count of items (0 or 1)
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
~concurrent_hash_map()
Clear table and destroy it.
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
const_range_type range(size_type grainsize=1) const
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
static node * allocate_node_default_construct(node_allocator_type &allocator, const Key &key, const T *)
bool emplace(Args &&... args)
Insert item by copying if there is no such key present already.
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
bool emplace(accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
static node * allocate_node_move_construct(node_allocator_type &allocator, const Key &key, const T *t)
bool generic_move_insert(Accessor &&result, value_type &&value)
internal::hash_map_iterator< concurrent_hash_map, value_type > iterator
allocator_type get_allocator() const
return allocator object
void insert(std::initializer_list< value_type > il)
Insert initializer list.
const_pointer internal_fast_find(const Key &key) const
Fast find when no concurrent erasure is used. For internal use inside TBB only!
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
friend const_accessor * accessor_location(accessor_not_used const &)
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
static node * allocate_node_copy_construct(node_allocator_type &allocator, const Key &key, const T *t)
concurrent_hash_map(I first, I last, const allocator_type &a=allocator_type())
Construction with copying iteration range and given allocator instance.
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
bool erase(accessor &item_accessor)
Erase item by accessor.
bool emplace(const_accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
friend bool is_write_access_needed(accessor const &)
void internal_move_assign(concurrent_hash_map &&other, tbb::internal::traits_false_type)
tbb::internal::allocator_traits< node_allocator_type > node_allocator_traits
hash_map_node_base * next
Next node in chain.
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
base class of concurrent_hash_map
static segment_index_t segment_base(segment_index_t k)
hash_map_node_base node_base
Node base type.
bucket * get_bucket(hashcode_t h) const
Get bucket by (masked) hashcode.
void reserve(size_type buckets, const Allocator &allocator)
Prepare enough segments for number of buckets.
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
static size_type const first_block
Count of segments in the first block.
void enable_segment(segment_index_t k, const Allocator &allocator, bool is_initial=false)
Enable segment.
void delete_segment(segment_index_t s, const Allocator &allocator)
bool check_rehashing_collision(const hashcode_t h, hashcode_t m_old, hashcode_t m) const
Process mask race, check for rehashing collision.
static void init_buckets(segment_ptr_t ptr, size_type sz, bool is_initial)
Initialize buckets.
void internal_swap(hash_map_base &table)
Swap hash_map_bases.
bucket my_embedded_segment[embedded_buckets]
Zero segment.
static size_type const pointers_per_table
Size of a pointer / table size.
static segment_index_t segment_index_of(size_type index)
static size_type const embedded_buckets
Count of segments in the first block.
static size_type segment_size(segment_index_t k)
bool check_mask_race(const hashcode_t h, hashcode_t &m) const
Check for mask race.
static void add_to_bucket(bucket *b, node_base *n)
Add node.
segment_ptr_t segments_table_t[pointers_per_table]
Segment pointers table type.
atomic< size_type > my_size
Size of container in stored items.
atomic< hashcode_t > my_mask
Hash mask = sum of allocated segment sizes - 1.
static size_type const embedded_block
Count of segments in the first block.
segment_index_t insert_new_node(bucket *b, node_base *n, hashcode_t mask)
Insert a node and check for load factor.
spin_rw_mutex mutex_t
Mutex type for buckets.
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
enable_segment_failsafe(segments_table_t &table, segment_index_t k)
Range class used with concurrent_hash_map.
bool empty() const
True if range is empty.
bool is_divisible() const
True if range can be partitioned into two subranges.
hash_map_range(hash_map_range< U > &r)
type conversion
hash_map_range(const map_type &map, size_type grainsize_=1)
Init range with container and grainsize specified.
std::size_t size_type
Type for size of a range.
size_type grainsize() const
The grain size for this range.
hash_map_range(hash_map_range &r, split)
Split range.
void set_midpoint() const
Set my_midpoint to point approximately half way between my_begin and my_end.
Meets requirements of a forward iterator for STL *‍/.
friend ptrdiff_t operator-(const hash_map_iterator< C, T > &i, const hash_map_iterator< C, U > &j)
size_t my_index
Index in hash table for current item.
friend bool operator==(const hash_map_iterator< C, T > &i, const hash_map_iterator< C, U > &j)
hash_map_iterator operator++(int)
Post increment.
hash_map_iterator(const hash_map_iterator< Container, typename Container::value_type > &other)
node * my_node
Pointer to node that has current item.
hash_map_iterator & operator=(const hash_map_iterator< Container, typename Container::value_type > &other)
hash_map_iterator()
Construct undefined iterator.
const Container * my_map
concurrent_hash_map over which we are iterating.
friend bool operator!=(const hash_map_iterator< C, T > &i, const hash_map_iterator< C, U > &j)
tbb::aligned_space< value_type > my_value
bucket accessor is to find, rehash, acquire a lock, and access a bucket
void acquire(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
find a bucket by masked hashcode, optionally rehash, and acquire the lock
bucket_accessor(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
bool is_writer()
check whether bucket is locked for write
Combines data access, locking, and garbage collection.
~const_accessor()
Destroy result after releasing the underlying reference.
const concurrent_hash_map::value_type value_type
Type of value.
const_pointer operator->() const
Return pointer to associated value in hash table.
const_reference operator*() const
Return reference to associated value in hash table.
Allows write access to elements and combines data access, locking, and garbage collection.
pointer operator->() const
Return pointer to associated value in hash table.
reference operator*() const
Return reference to associated value in hash table.
concurrent_hash_map::value_type value_type
Type of value.
static void deallocate(Alloc &a, pointer p, size_type n)
static void construct(Alloc &, PT *p)
static pointer allocate(Alloc &a, size_type n)
static void destroy(Alloc &, T *p)
allocator_traits< Alloc >::template rebind_alloc< T >::other type
hash_compare that is default argument for concurrent_hash_map
Fast, unfair, spinning reader-writer lock with backoff and writer-preference.
The scoped locking pattern.
bool is_writer
If mutex!=NULL, then is_writer is true if holding a writer lock, false if holding a reader lock.
Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.
Class that implements exponential backoff.
Base class for types that should not be copied or assigned.
Definition tbb_stddef.h:330
Dummy type that distinguishes splitting constructor from copy constructor.
Definition tbb_stddef.h:416

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.