Intel(R) Threading Building Blocks Doxygen Documentation version 4.2.3
Loading...
Searching...
No Matches
concurrent_vector.h
Go to the documentation of this file.
1/*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#ifndef __TBB_concurrent_vector_H
18#define __TBB_concurrent_vector_H
19
20#define __TBB_concurrent_vector_H_include_area
22
23#include "tbb_stddef.h"
24#include "tbb_exception.h"
25#include "atomic.h"
27#include "blocked_range.h"
28#include "tbb_machine.h"
29#include "tbb_profiling.h"
30#include <new>
31#include <cstring> // for memset()
32#include __TBB_STD_SWAP_HEADER
33#include <algorithm>
34#include <iterator>
35
37
38#if _MSC_VER==1500 && !__INTEL_COMPILER
39 // VS2008/VC9 seems to have an issue; limits pull in math.h
40 #pragma warning( push )
41 #pragma warning( disable: 4985 )
42#endif
43#include <limits> /* std::numeric_limits */
44#if _MSC_VER==1500 && !__INTEL_COMPILER
45 #pragma warning( pop )
46#endif
47
48#if __TBB_INITIALIZER_LISTS_PRESENT
49 #include <initializer_list>
50#endif
51
52#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
53 // Workaround for overzealous compiler warnings in /Wp64 mode
54 #pragma warning (push)
55#if defined(_Wp64)
56 #pragma warning (disable: 4267)
57#endif
58 #pragma warning (disable: 4127) //warning C4127: conditional expression is constant
59#endif
60
61namespace tbb {
62
63template<typename T, class A = cache_aligned_allocator<T> >
64class concurrent_vector;
65
67namespace internal {
68
69 template<typename Container, typename Value>
70 class vector_iterator;
71
73 static void *const vector_allocation_error_flag = reinterpret_cast<void*>(size_t(63));
74
76 template<typename T>
77 void handle_unconstructed_elements(T* array, size_t n_of_elements){
78 std::memset( static_cast<void*>(array), 0, n_of_elements * sizeof( T ) );
79 }
80
82
84 protected:
85
86 // Basic types declarations
87 typedef size_t segment_index_t;
88 typedef size_t size_type;
89
90 // Using enumerations due to Mac linking problems of static const variables
91 enum {
92 // Size constants
93 default_initial_segments = 1, // 2 initial items
95 pointers_per_short_table = 3, // to fit into 8 words of entire structure
96 pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit
97 };
98
102
103 class segment_t;
105 void* array;
106 private:
107 //TODO: More elegant way to grant access to selected functions _only_?
108 friend class segment_t;
109 explicit segment_value_t(void* an_array):array(an_array) {}
110 public:
111 friend bool operator==(segment_value_t const& lhs, segment_not_used ) { return lhs.array == 0;}
114 template<typename argument_type>
115 friend bool operator!=(segment_value_t const& lhs, argument_type arg) { return ! (lhs == arg);}
116
117 template<typename T>
118 T* pointer() const { return static_cast<T*>(const_cast<void*>(array)); }
119 };
120
122 if(s != segment_allocated()){
123 internal::throw_exception(exception);
124 }
125 }
126
127 // Segment pointer.
128 class segment_t {
129 atomic<void*> array;
130 public:
131 segment_t(){ store<relaxed>(segment_not_used());}
132 //Copy ctor and assignment operator are defined to ease using of stl algorithms.
133 //These algorithms usually not a synchronization point, so, semantic is
134 //intentionally relaxed here.
135 segment_t(segment_t const& rhs ){ array.store<relaxed>(rhs.array.load<relaxed>());}
136
137 void swap(segment_t & rhs ){
138 tbb::internal::swap<relaxed>(array, rhs.array);
139 }
140
142 array.store<relaxed>(rhs.array.load<relaxed>());
143 return *this;
144 }
145
146 template<memory_semantics M>
147 segment_value_t load() const { return segment_value_t(array.load<M>());}
148
149 template<memory_semantics M>
151 array.store<M>(0);
152 }
153
154 template<memory_semantics M>
156 __TBB_ASSERT(load<relaxed>() != segment_allocated(),"transition from \"allocated\" to \"allocation failed\" state looks non-logical");
158 }
159
160 template<memory_semantics M>
161 void store(void* allocated_segment_pointer) __TBB_NOEXCEPT(true) {
162 __TBB_ASSERT(segment_value_t(allocated_segment_pointer) == segment_allocated(),
163 "other overloads of store should be used for marking segment as not_used or allocation_failed" );
164 array.store<M>(allocated_segment_pointer);
165 }
166
167#if TBB_USE_ASSERT
168 ~segment_t() {
169 __TBB_ASSERT(load<relaxed>() != segment_allocated(), "should have been freed by clear" );
170 }
171#endif /* TBB_USE_ASSERT */
172 };
173 friend void swap(segment_t & , segment_t & ) __TBB_NOEXCEPT(true);
174
175 // Data fields
176
178 void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t);
179
181 atomic<size_type> my_first_block;
182
184 atomic<size_type> my_early_size;
185
187 atomic<segment_t*> my_segment;
188
191
192 // Methods
193
195 //Here the semantic is intentionally relaxed.
196 //The reason this is next:
197 //Object that is in middle of construction (i.e. its constructor is not yet finished)
198 //cannot be used concurrently until the construction is finished.
199 //Thus to flag other threads that construction is finished, some synchronization with
200 //acquire-release semantic should be done by the (external) code that uses the vector.
201 //So, no need to do the synchronization inside the vector.
202
203 my_early_size.store<relaxed>(0);
204 my_first_block.store<relaxed>(0); // here is not default_initial_segments
206 }
207
209
210 //these helpers methods use the fact that segments are allocated so
211 //that every segment size is a (increasing) power of 2.
212 //with one exception 0 segment has size of 2 as well segment 1;
213 //e.g. size of segment with index of 3 is 2^3=8;
215 return segment_index_t( __TBB_Log2( index|1 ) );
216 }
217
219 return (segment_index_t(1)<<k & ~segment_index_t(1));
220 }
221
224 index -= segment_base(k);
225 return k;
226 }
227
229 return segment_index_t(1)<<k; // fake value for k==0
230 }
231
232
233 static bool is_first_element_in_segment(size_type element_index){
234 //check if element_index is a power of 2 that is at least 2.
235 //The idea is to detect if the iterator crosses a segment boundary,
236 //and 2 is the minimal index for which it's true
237 __TBB_ASSERT(element_index, "there should be no need to call "
238 "is_first_element_in_segment for 0th element" );
239 return is_power_of_two_at_least( element_index, 2 );
240 }
241
244
246 typedef void (__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, const void* src, size_type n );
247
252 };
253
254 void __TBB_EXPORTED_METHOD internal_reserve( size_type n, size_type element_size, size_type max_size );
256 void internal_grow( size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src );
257 size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, const void *src );
258 void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_size, size_type& index );
260 void* __TBB_EXPORTED_METHOD internal_compact( size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy );
267
268 void __TBB_EXPORTED_METHOD internal_resize( size_type n, size_type element_size, size_type max_size, const void *src,
271
274private:
276 class helper;
277 friend class helper;
278
279 template<typename Container, typename Value>
280 friend class vector_iterator;
281
282 };
283
285 lhs.swap(rhs);
286 }
287
289
291
293 template<typename Container, typename Value>
295 {
297 Container* my_vector;
298
300 size_t my_index;
301
303
304 mutable Value* my_item;
305
306 template<typename C, typename T>
307 friend vector_iterator<C,T> operator+( ptrdiff_t offset, const vector_iterator<C,T>& v );
308
309 template<typename C, typename T, typename U>
310 friend bool operator==( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
311
312 template<typename C, typename T, typename U>
313 friend bool operator<( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
314
315 template<typename C, typename T, typename U>
316 friend ptrdiff_t operator-( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
317
318 template<typename C, typename U>
320
321#if !__TBB_TEMPLATE_FRIENDS_BROKEN
322 template<typename T, class A>
324#else
325public:
326#endif
327
328 vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) :
329 my_vector(const_cast<Container*>(&vector)),
330 my_index(index),
331 my_item(static_cast<Value*>(ptr))
332 {}
333
334 public:
336 vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {}
337
339 my_vector(other.my_vector),
340 my_index(other.my_index),
341 my_item(other.my_item)
342 {}
343
345 {
346 my_vector=other.my_vector;
347 my_index=other.my_index;
348 my_item=other.my_item;
349 return *this;
350 }
351
352 vector_iterator operator+( ptrdiff_t offset ) const {
353 return vector_iterator( *my_vector, my_index+offset );
354 }
355 vector_iterator &operator+=( ptrdiff_t offset ) {
356 my_index+=offset;
357 my_item = NULL;
358 return *this;
359 }
360 vector_iterator operator-( ptrdiff_t offset ) const {
361 return vector_iterator( *my_vector, my_index-offset );
362 }
363 vector_iterator &operator-=( ptrdiff_t offset ) {
364 my_index-=offset;
365 my_item = NULL;
366 return *this;
367 }
368 Value& operator*() const {
369 Value* item = my_item;
370 if( !item ) {
371 item = my_item = &my_vector->internal_subscript(my_index);
372 }
373 __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" );
374 return *item;
375 }
376 Value& operator[]( ptrdiff_t k ) const {
377 return my_vector->internal_subscript(my_index+k);
378 }
379 Value* operator->() const {return &operator*();}
380
383 size_t element_index = ++my_index;
384 if( my_item ) {
385 //TODO: consider using of knowledge about "first_block optimization" here as well?
387 //if the iterator crosses a segment boundary, the pointer become invalid
388 //as possibly next segment is in another memory location
389 my_item= NULL;
390 } else {
391 ++my_item;
392 }
393 }
394 return *this;
395 }
396
399 __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" );
400 size_t element_index = my_index--;
401 if( my_item ) {
403 //if the iterator crosses a segment boundary, the pointer become invalid
404 //as possibly next segment is in another memory location
405 my_item= NULL;
406 } else {
407 --my_item;
408 }
409 }
410 return *this;
411 }
412
415 vector_iterator result = *this;
416 operator++();
417 return result;
418 }
419
422 vector_iterator result = *this;
423 operator--();
424 return result;
425 }
426
427 // STL support
428
429 typedef ptrdiff_t difference_type;
430 typedef Value value_type;
431 typedef Value* pointer;
432 typedef Value& reference;
433 typedef std::random_access_iterator_tag iterator_category;
434 };
435
436 template<typename Container, typename T>
438 return vector_iterator<Container,T>( *v.my_vector, v.my_index+offset );
439 }
440
441 template<typename Container, typename T, typename U>
443 return i.my_index==j.my_index && i.my_vector == j.my_vector;
444 }
445
446 template<typename Container, typename T, typename U>
448 return !(i==j);
449 }
450
451 template<typename Container, typename T, typename U>
453 return i.my_index<j.my_index;
454 }
455
456 template<typename Container, typename T, typename U>
458 return j<i;
459 }
460
461 template<typename Container, typename T, typename U>
463 return !(i<j);
464 }
465
466 template<typename Container, typename T, typename U>
468 return !(j<i);
469 }
470
471 template<typename Container, typename T, typename U>
473 return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index);
474 }
475
476 template<typename T, class A>
478 public:
482 };
483
484} // namespace internal
486
488
549template<typename T, class A>
552private:
553 template<typename I>
555 public:
556 typedef T value_type;
557 typedef T& reference;
558 typedef const T& const_reference;
559 typedef I iterator;
560 typedef ptrdiff_t difference_type;
561 generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
562 template<typename U>
565 };
566
567 template<typename C, typename U>
569
570public:
571 //------------------------------------------------------------------------
572 // STL compatible types
573 //------------------------------------------------------------------------
576
577 typedef T value_type;
578 typedef ptrdiff_t difference_type;
579 typedef T& reference;
580 typedef const T& const_reference;
581 typedef T *pointer;
582 typedef const T *const_pointer;
583
586
587#if !defined(_MSC_VER) || _CPPLIB_VER>=300
588 // Assume ISO standard definition of std::reverse_iterator
589 typedef std::reverse_iterator<iterator> reverse_iterator;
590 typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
591#else
592 // Use non-standard std::reverse_iterator
593 typedef std::reverse_iterator<iterator,T,T&,T*> reverse_iterator;
594 typedef std::reverse_iterator<const_iterator,T,const T&,const T*> const_reverse_iterator;
595#endif /* defined(_MSC_VER) && (_MSC_VER<1300) */
596
597 //------------------------------------------------------------------------
598 // Parallel algorithm support
599 //------------------------------------------------------------------------
602
603 //------------------------------------------------------------------------
604 // STL compatible constructors & destructors
605 //------------------------------------------------------------------------
606
610 {
612 }
613
614 //Constructors are not required to have synchronization
615 //(for more details see comment in the concurrent_vector_base constructor).
616#if __TBB_INITIALIZER_LISTS_PRESENT
618 concurrent_vector(std::initializer_list<T> init_list, const allocator_type &a = allocator_type())
620 {
622 __TBB_TRY {
623 internal_assign_iterators(init_list.begin(), init_list.end());
624 } __TBB_CATCH(...) {
625 segment_t *table = my_segment.load<relaxed>();;
628 }
629
630 }
631#endif //# __TBB_INITIALIZER_LISTS_PRESENT
632
636 {
638 __TBB_TRY {
639 internal_copy(vector, sizeof(T), &copy_array);
640 } __TBB_CATCH(...) {
641 segment_t *table = my_segment.load<relaxed>();
644 }
645 }
646
647#if __TBB_CPP11_RVALUE_REF_PRESENT
649 //TODO add __TBB_NOEXCEPT(true) and static_assert(std::has_nothrow_move_constructor<A>::value)
652 {
655 }
656
659 {
661 //C++ standard requires instances of an allocator being compared for equality,
662 //which means that memory allocated by one instance is possible to deallocate with the other one.
663 if (a == source.my_allocator) {
665 } else {
666 __TBB_TRY {
667 internal_copy(source, sizeof(T), &move_array);
668 } __TBB_CATCH(...) {
669 segment_t *table = my_segment.load<relaxed>();
672 }
673 }
674 }
675
676#endif
677
679 template<class M>
682 {
684 __TBB_TRY {
685 internal_copy(vector.internal_vector_base(), sizeof(T), &copy_array);
686 } __TBB_CATCH(...) {
687 segment_t *table = my_segment.load<relaxed>();
690 }
691 }
692
695 {
697 __TBB_TRY {
698 internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
699 } __TBB_CATCH(...) {
700 segment_t *table = my_segment.load<relaxed>();
703 }
704 }
705
708 : internal::allocator_base<T, A>(a)
709 {
711 __TBB_TRY {
712 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
713 } __TBB_CATCH(...) {
714 segment_t *table = my_segment.load<relaxed>();
717 }
718 }
719
721 template<class I>
723 : internal::allocator_base<T, A>(a)
724 {
726 __TBB_TRY {
728 } __TBB_CATCH(...) {
729 segment_t *table = my_segment.load<relaxed>();
732 }
733 }
734
737 if( this != &vector )
738 internal_assign(vector, sizeof(T), &destroy_array, &assign_array, &copy_array);
739 return *this;
740 }
741
742#if __TBB_CPP11_RVALUE_REF_PRESENT
743 //TODO: add __TBB_NOEXCEPT()
746 __TBB_ASSERT(this != &other, "Move assignment to itself is prohibited ");
748 if(pocma_t::value || this->my_allocator == other.my_allocator) {
749 concurrent_vector trash (std::move(*this));
750 internal_swap(other);
751 tbb::internal::allocator_move_assignment(this->my_allocator, other.my_allocator, pocma_t());
752 } else {
754 }
755 return *this;
756 }
757#endif
758 //TODO: add an template assignment operator? (i.e. with different element type)
759
761 template<class M>
763 if( static_cast<void*>( this ) != static_cast<const void*>( &vector ) )
765 sizeof(T), &destroy_array, &assign_array, &copy_array);
766 return *this;
767 }
768
769#if __TBB_INITIALIZER_LISTS_PRESENT
771 concurrent_vector& operator=( std::initializer_list<T> init_list ) {
773 internal_assign_iterators(init_list.begin(), init_list.end());
774 return *this;
775 }
776#endif //#if __TBB_INITIALIZER_LISTS_PRESENT
777
778 //------------------------------------------------------------------------
779 // Concurrent operations
780 //------------------------------------------------------------------------
782
784 return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size.load());
785 }
786
788
790 return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast<const void*>(&t) ) : my_early_size.load());
791 }
792
794 template<typename I>
796 typename std::iterator_traits<I>::difference_type delta = std::distance(first, last);
797 __TBB_ASSERT( delta >= 0, NULL);
798
799 return iterator(*this, delta ? internal_grow_by(delta, sizeof(T), &copy_range<I>, static_cast<const void*>(&first)) : my_early_size.load());
800 }
801
802#if __TBB_INITIALIZER_LISTS_PRESENT
804 iterator grow_by( std::initializer_list<T> init_list ) {
805 return grow_by( init_list.begin(), init_list.end() );
806 }
807#endif //#if __TBB_INITIALIZER_LISTS_PRESENT
808
810
815 size_type m=0;
816 if( n ) {
818 if( m>n ) m=n;
819 }
820 return iterator(*this, m);
821 };
822
826 size_type m=0;
827 if( n ) {
829 if( m>n ) m=n;
830 }
831 return iterator(*this, m);
832 };
833
835
837 {
838 push_back_helper prolog(*this);
839 new(prolog.internal_push_back_result()) T(item);
840 return prolog.return_iterator_and_dismiss();
841 }
842
843#if __TBB_CPP11_RVALUE_REF_PRESENT
845
846 iterator push_back( T&& item )
847 {
848 push_back_helper prolog(*this);
849 new(prolog.internal_push_back_result()) T(std::move(item));
850 return prolog.return_iterator_and_dismiss();
851 }
852#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
854
855 template<typename... Args>
856 iterator emplace_back( Args&&... args )
857 {
858 push_back_helper prolog(*this);
859 new(prolog.internal_push_back_result()) T(std::forward<Args>(args)...);
860 return prolog.return_iterator_and_dismiss();
861 }
862#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
863#endif //__TBB_CPP11_RVALUE_REF_PRESENT
865
868 return internal_subscript(index);
869 }
870
873 return internal_subscript(index);
874 }
875
879 }
880
882 const_reference at( size_type index ) const {
884 }
885
887 range_type range( size_t grainsize = 1 ) {
888 return range_type( begin(), end(), grainsize );
889 }
890
892 const_range_type range( size_t grainsize = 1 ) const {
893 return const_range_type( begin(), end(), grainsize );
894 }
895
896 //------------------------------------------------------------------------
897 // Capacity
898 //------------------------------------------------------------------------
900 size_type size() const {
902 return cp < sz ? cp : sz;
903 }
904
906 bool empty() const {return !my_early_size;}
907
910
912
914 void reserve( size_type n ) {
915 if( n )
916 internal_reserve(n, sizeof(T), max_size());
917 }
918
920 void resize( size_type n ) {
921 internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
922 }
923
926 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
927 }
928
931
933 size_type max_size() const {return (~size_type(0))/sizeof(T);}
934
935 //------------------------------------------------------------------------
936 // STL support
937 //------------------------------------------------------------------------
938
940 iterator begin() {return iterator(*this,0);}
942 iterator end() {return iterator(*this,size());}
944 const_iterator begin() const {return const_iterator(*this,0);}
946 const_iterator end() const {return const_iterator(*this,size());}
948 const_iterator cbegin() const {return const_iterator(*this,0);}
950 const_iterator cend() const {return const_iterator(*this,size());}
965 __TBB_ASSERT( size()>0, NULL);
966 const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
967 return (segment_value.template pointer<T>())[0];
968 }
971 __TBB_ASSERT( size()>0, NULL);
972 const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
973 return (segment_value.template pointer<const T>())[0];
974 }
977 __TBB_ASSERT( size()>0, NULL);
978 return internal_subscript( size()-1 );
979 }
982 __TBB_ASSERT( size()>0, NULL);
983 return internal_subscript( size()-1 );
984 }
986 allocator_type get_allocator() const { return this->my_allocator; }
987
989 void assign(size_type n, const_reference t) {
990 clear();
991 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
992 }
993
995 template<class I>
996 void assign(I first, I last) {
998 }
999
1000#if __TBB_INITIALIZER_LISTS_PRESENT
1002 void assign(std::initializer_list<T> init_list) {
1003 clear(); internal_assign_iterators( init_list.begin(), init_list.end());
1004 }
1005#endif //# __TBB_INITIALIZER_LISTS_PRESENT
1006
1008 void swap(concurrent_vector &vector) {
1010 if( this != &vector && (this->my_allocator == vector.my_allocator || pocs_t::value) ) {
1013 }
1014 }
1015
1017
1018 void clear() {
1020 }
1021
1024 segment_t *table = my_segment.load<relaxed>();
1026 // base class destructor call should be then
1027 }
1028
1030private:
1033 return static_cast<concurrent_vector<T, A>&>(vb).my_allocator.allocate(k);
1034 }
1037
1040
1043
1046 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(p), &destroy_array, p? &initialize_array_by : &initialize_array );
1047 }
1048
1050 /* Functions declarations:
1051 * void foo(is_integer_tag<true>*);
1052 * void foo(is_integer_tag<false>*);
1053 * Usage example:
1054 * foo(static_cast<is_integer_tag<std::numeric_limits<T>::is_integer>*>(0));
1055 */
1056 template<bool B> class is_integer_tag;
1057
1059 template<class I>
1061 internal_assign_n(static_cast<size_type>(first), &static_cast<T&>(last));
1062 }
1064 template<class I>
1067 }
1069 template<class I>
1071
1072 //these functions are marked __TBB_EXPORTED_FUNC as they are called from within the library
1073
1075 static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n );
1076
1078 static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n );
1079
1081 static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n );
1082
1083#if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1085 static void __TBB_EXPORTED_FUNC move_array_if_noexcept( void* dst, const void* src, size_type n );
1086#endif //__TBB_MOVE_IF_NO_EXCEPT_PRESENT
1087
1088#if __TBB_CPP11_RVALUE_REF_PRESENT
1090 static void __TBB_EXPORTED_FUNC move_array( void* dst, const void* src, size_type n );
1091
1093 static void __TBB_EXPORTED_FUNC move_assign_array( void* dst, const void* src, size_type n );
1094#endif
1096 template<typename Iterator>
1097 static void __TBB_EXPORTED_FUNC copy_range( void* dst, const void* p_type_erased_iterator, size_type n );
1098
1100 static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n );
1101
1104
1107 public:
1111
1112 static const T* as_const_pointer(const void *ptr) { return static_cast<const T *>(ptr); }
1113 static T* as_pointer(const void *src) { return static_cast<T*>(const_cast<void *>(src)); }
1114
1115 internal_loop_guide(size_type ntrials, void *ptr)
1116 : array(as_pointer(ptr)), n(ntrials), i(0) {}
1117 void init() { for(; i < n; ++i) new( &array[i] ) T(); }
1118 void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*as_const_pointer(src)); }
1119 void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(as_const_pointer(src)[i]); }
1120 void assign(const void *src) { for(; i < n; ++i) array[i] = as_const_pointer(src)[i]; }
1121#if __TBB_CPP11_RVALUE_REF_PRESENT
1122 void move_assign(const void *src) { for(; i < n; ++i) array[i] = std::move(as_pointer(src)[i]); }
1123 void move_construct(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move(as_pointer(src)[i]) ); }
1124#endif
1125#if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1126 void move_construct_if_noexcept(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move_if_noexcept(as_pointer(src)[i]) ); }
1127#endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1128
1129 //TODO: rename to construct_range
1130 template<class I> void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); }
1132 if(i < n) {// if an exception was raised, fill the rest of items with zeros
1134 }
1135 }
1136 };
1137
1141
1142 element_construction_guard(pointer an_element) : element (an_element){}
1143 void dismiss(){ element = NULL; }
1145 if (element){
1147 }
1148 }
1149 };
1150
1154
1156 v(vector),
1157 g (static_cast<T*>(v.internal_push_back(sizeof(T),k)))
1158 {}
1159
1162 pointer ptr = g.element;
1163 g.dismiss();
1164 return iterator(v, k, ptr);
1165 }
1166 };
1167};
1168
1169#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1170// Deduction guide for the constructor from two iterators
1171template<typename I,
1172 typename T = typename std::iterator_traits<I>::value_type,
1173 typename A = cache_aligned_allocator<T>
1174> concurrent_vector(I, I, const A& = A())
1175-> concurrent_vector<T, A>;
1176
1177// Deduction guide for the constructor from a vector and allocator
1178template<typename T, typename A1, typename A2>
1179concurrent_vector(const concurrent_vector<T, A1> &, const A2 &)
1180-> concurrent_vector<T, A2>;
1181
1182// Deduction guide for the constructor from an initializer_list
1183template<typename T, typename A = cache_aligned_allocator<T>
1184> concurrent_vector(std::initializer_list<T>, const A& = A())
1185-> concurrent_vector<T, A>;
1186#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
1187
1188#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1189#pragma warning (push)
1190#pragma warning (disable: 4701) // potentially uninitialized local variable "old"
1191#endif
1192template<typename T, class A>
1195 __TBB_TRY {
1196 internal_array_op2 copy_or_move_array =
1197#if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1198 &move_array_if_noexcept
1199#else
1200 &copy_array
1201#endif
1202 ;
1203 if( internal_compact( sizeof(T), &old, &destroy_array, copy_or_move_array ) )
1204 internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments
1205 } __TBB_CATCH(...) {
1206 if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype]
1207 internal_free_segments( old.table, 1, old.first_block );
1208 __TBB_RETHROW();
1209 }
1210}
1211#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1212#pragma warning (pop)
1213#endif // warning 4701 is back
1214
1215template<typename T, class A>
1217 // Free the arrays
1218 while( k > first_block ) {
1219 --k;
1220 segment_value_t segment_value = table[k].load<relaxed>();
1221 table[k].store<relaxed>(segment_not_used());
1222 if( segment_value == segment_allocated() ) // check for correct segment pointer
1223 this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(k) );
1224 }
1225 segment_value_t segment_value = table[0].load<relaxed>();
1226 if( segment_value == segment_allocated() ) {
1227 __TBB_ASSERT( first_block > 0, NULL );
1228 while(k > 0) table[--k].store<relaxed>(segment_not_used());
1229 this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(first_block) );
1230 }
1231}
1232
1233template<typename T, class A>
1235 //TODO: unify both versions of internal_subscript
1236 __TBB_ASSERT( index < my_early_size, "index out of bounds" );
1237 size_type j = index;
1238 segment_index_t k = segment_base_index_of( j );
1239 __TBB_ASSERT( my_segment.load<acquire>() != my_storage || k < pointers_per_short_table, "index is being allocated" );
1240 //no need in load with acquire (load<acquire>) since thread works in own space or gets
1241 //the information about added elements via some form of external synchronization
1242 //TODO: why not make a load of my_segment relaxed as well ?
1243 //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1244 segment_value_t segment_value = my_segment[k].template load<relaxed>();
1245 __TBB_ASSERT( segment_value != segment_allocation_failed(), "the instance is broken by bad allocation. Use at() instead" );
1246 __TBB_ASSERT( segment_value != segment_not_used(), "index is being allocated" );
1247 return (( segment_value.pointer<T>()))[j];
1248}
1249
1250template<typename T, class A>
1252 if( index >= my_early_size )
1253 internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range
1254 size_type j = index;
1255 segment_index_t k = segment_base_index_of( j );
1256 //TODO: refactor this condition into separate helper function, e.g. fits_into_small_table
1257 if( my_segment.load<acquire>() == my_storage && k >= pointers_per_short_table )
1259 // no need in load with acquire (load<acquire>) since thread works in own space or gets
1260 //the information about added elements via some form of external synchronization
1261 //TODO: why not make a load of my_segment relaxed as well ?
1262 //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1263 segment_value_t segment_value = my_segment[k].template load<relaxed>();
1264 enforce_segment_allocated(segment_value, internal::eid_index_range_error);
1265 return (segment_value.pointer<T>())[j];
1266}
1267
1268template<typename T, class A> template<class I>
1270 __TBB_ASSERT(my_early_size == 0, NULL);
1271 size_type n = std::distance(first, last);
1272 if( !n ) return;
1273 internal_reserve(n, sizeof(T), max_size());
1274 my_early_size = n;
1275 segment_index_t k = 0;
1276 //TODO: unify segment iteration code with concurrent_base_v3::helper
1277 size_type sz = segment_size( my_first_block );
1278 while( sz < n ) {
1279 internal_loop_guide loop(sz, my_segment[k].template load<relaxed>().template pointer<void>());
1280 loop.iterate(first);
1281 n -= sz;
1282 if( !k ) k = my_first_block;
1283 else { ++k; sz <<= 1; }
1284 }
1285 internal_loop_guide loop(n, my_segment[k].template load<relaxed>().template pointer<void>());
1286 loop.iterate(first);
1287}
1288
1289template<typename T, class A>
1290void concurrent_vector<T, A>::initialize_array( void* begin, const void *, size_type n ) {
1291 internal_loop_guide loop(n, begin); loop.init();
1292}
1293
1294template<typename T, class A>
1295void concurrent_vector<T, A>::initialize_array_by( void* begin, const void *src, size_type n ) {
1296 internal_loop_guide loop(n, begin); loop.init(src);
1297}
1298
1299template<typename T, class A>
1300void concurrent_vector<T, A>::copy_array( void* dst, const void* src, size_type n ) {
1301 internal_loop_guide loop(n, dst); loop.copy(src);
1302}
1303
1304#if __TBB_CPP11_RVALUE_REF_PRESENT
1305template<typename T, class A>
1306void concurrent_vector<T, A>::move_array( void* dst, const void* src, size_type n ) {
1307 internal_loop_guide loop(n, dst); loop.move_construct(src);
1308}
1309template<typename T, class A>
1310void concurrent_vector<T, A>::move_assign_array( void* dst, const void* src, size_type n ) {
1311 internal_loop_guide loop(n, dst); loop.move_assign(src);
1312}
1313#endif
1314
1315#if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1316template<typename T, class A>
1317void concurrent_vector<T, A>::move_array_if_noexcept( void* dst, const void* src, size_type n ) {
1318 internal_loop_guide loop(n, dst); loop.move_construct_if_noexcept(src);
1319}
1320#endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1321
1322template<typename T, class A>
1323template<typename I>
1324void concurrent_vector<T, A>::copy_range( void* dst, const void* p_type_erased_iterator, size_type n ){
1325 internal_loop_guide loop(n, dst);
1326 loop.iterate( *(static_cast<I*>(const_cast<void*>(p_type_erased_iterator))) );
1327}
1328
1329template<typename T, class A>
1330void concurrent_vector<T, A>::assign_array( void* dst, const void* src, size_type n ) {
1331 internal_loop_guide loop(n, dst); loop.assign(src);
1332}
1333
1334#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1335 // Workaround for overzealous compiler warning
1336 #pragma warning (push)
1337 #pragma warning (disable: 4189)
1338#endif
1339template<typename T, class A>
1341 T* array = static_cast<T*>(begin);
1342 for( size_type j=n; j>0; --j )
1343 array[j-1].~T(); // destructors are supposed to not throw any exceptions
1344}
1345#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1346 #pragma warning (pop)
1347#endif // warning 4189 is back
1348
1349// concurrent_vector's template functions
1350template<typename T, class A1, class A2>
1352 //TODO: call size() only once per vector (in operator==)
1353 // Simply: return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin());
1354 if(a.size() != b.size()) return false;
1357 for(; i != a.end(); ++i, ++j)
1358 if( !(*i == *j) ) return false;
1359 return true;
1360}
1361
1362template<typename T, class A1, class A2>
1364{ return !(a == b); }
1365
1366template<typename T, class A1, class A2>
1368{ return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); }
1369
1370template<typename T, class A1, class A2>
1372{ return b < a; }
1373
1374template<typename T, class A1, class A2>
1376{ return !(b < a); }
1377
1378template<typename T, class A1, class A2>
1380{ return !(a < b); }
1381
1382template<typename T, class A>
1384{ a.swap( b ); }
1385
1386} // namespace tbb
1387
1388#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1389 #pragma warning (pop)
1390#endif // warning 4267,4127 are back
1391
1392
1393#undef __TBB_concurrent_vector_H_include_area
1395
1396#endif /* __TBB_concurrent_vector_H */
#define __TBB_Log2(V)
#define __TBB_EXPORTED_FUNC
#define __TBB_DEPRECATED
Definition tbb_config.h:636
#define __TBB_CATCH(e)
Definition tbb_stddef.h:284
#define __TBB_NOEXCEPT(expression)
Definition tbb_stddef.h:110
#define __TBB_TRY
Definition tbb_stddef.h:283
#define __TBB_EXPORTED_METHOD
Definition tbb_stddef.h:98
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition tbb_stddef.h:165
#define __TBB_RETHROW()
Definition tbb_stddef.h:286
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t new_size
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp begin
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void const char const char int ITT_FORMAT __itt_group_sync p
STL namespace.
The graph class.
bool operator==(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
void move(tbb_thread &t1, tbb_thread &t2)
Definition tbb_thread.h:319
@ relaxed
No ordering.
Definition atomic.h:61
@ acquire
Acquire.
Definition atomic.h:57
bool operator!=(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
bool operator>=(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
void allocator_swap(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
static void *const vector_allocation_error_flag
Bad allocation marker.
void swap(atomic< T > &lhs, atomic< T > &rhs)
Definition atomic.h:564
vector_iterator< Container, T > operator+(ptrdiff_t offset, const vector_iterator< Container, T > &v)
void handle_unconstructed_elements(T *array, size_t n_of_elements)
Exception helper function.
void allocator_move_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
bool operator<=(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
ptrdiff_t operator-(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
bool operator>(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
concurrent_vector_base_v3 concurrent_vector_base
auto last(Container &c) -> decltype(begin(c))
auto first(Container &c) -> decltype(begin(c))
bool operator<(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
bool is_power_of_two_at_least(argument_integer_type arg, power2_integer_type power2)
A function to determine if arg is a power of 2 at least as big as another power of 2.
Definition tbb_stddef.h:392
A range over which to iterate.
const_iterator begin() const
Beginning of range.
size_type grainsize() const
The grain size for this range.
const_iterator end() const
One past last value in range.
Concurrent vector container.
concurrent_vector(const concurrent_vector &vector, const allocator_type &a=allocator_type())
Copying constructor.
void reserve(size_type n)
Allocate enough space to grow to size n without having to allocate more memory later.
const_reverse_iterator crbegin() const
reverse start const iterator
void internal_assign_range(I first, I last, is_integer_tag< false > *)
inline proxy assign by iterators
static void __TBB_EXPORTED_FUNC initialize_array(void *begin, const void *, size_type n)
Construct n instances of T, starting at "begin".
generic_range_type< const_iterator > const_range_type
std::reverse_iterator< iterator > reverse_iterator
internal::allocator_base< T, A >::allocator_type allocator_type
const_iterator end() const
end const iterator
const_reference front() const
the first item const
concurrent_vector & operator=(const concurrent_vector &vector)
Assignment.
static void __TBB_EXPORTED_FUNC move_array(void *dst, const void *src, size_type n)
Move-construct n instances of T, starting at "dst" by copying according element of src array.
__TBB_DEPRECATED concurrent_vector & operator=(const concurrent_vector< T, M > &vector)
Assignment for vector with different allocator type.
reference operator[](size_type index)
Get reference to element at given index.
size_type size() const
Return size of vector. It may include elements under construction.
const_reverse_iterator rend() const
reverse end const iterator
static void __TBB_EXPORTED_FUNC assign_array(void *dst, const void *src, size_type n)
Assign (using operator=) n instances of T, starting at "dst" by assigning according element of src ar...
allocator_type get_allocator() const
return allocator object
const_reference back() const
the last item const
iterator grow_to_at_least(size_type n, const_reference t)
generic_range_type< iterator > range_type
void internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block)
Free k segments from table.
static void * internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k)
Allocate k items.
internal::vector_iterator< concurrent_vector, T > iterator
void internal_assign_n(size_type n, const_pointer p)
assign n items by copying t
void clear()
Clear container while keeping memory allocated.
const_reference at(size_type index) const
Get const reference to element at given index. Throws exceptions on errors.
void assign(std::initializer_list< T > init_list)
assigns an initializer list
concurrent_vector(const allocator_type &a=allocator_type())
Construct empty vector.
const_iterator cend() const
end const iterator
static void __TBB_EXPORTED_FUNC initialize_array_by(void *begin, const void *src, size_type n)
Copy-construct n instances of T, starting at "begin".
reverse_iterator rend()
reverse end iterator
reference at(size_type index)
Get reference to element at given index. Throws exceptions on errors.
void resize(size_type n)
Resize the vector. Not thread-safe.
static void __TBB_EXPORTED_FUNC destroy_array(void *begin, size_type n)
Destroy n instances of T, starting at "begin".
concurrent_vector(size_type n, const_reference t, const allocator_type &a=allocator_type())
Construction with initial size specified by argument n, initialization by copying of t,...
const_iterator cbegin() const
start const iterator
void assign(I first, I last)
assign range [first, last)
bool empty() const
Return false if vector is not empty or has elements under construction at least.
range_type range(size_t grainsize=1)
Get range for iterating with parallel algorithms.
static void __TBB_EXPORTED_FUNC copy_range(void *dst, const void *p_type_erased_iterator, size_type n)
Copy-construct n instances of T, starting at "dst" by iterator range of [p_type_erased_iterator,...
void swap(concurrent_vector &vector)
swap two instances
void assign(size_type n, const_reference t)
assign n items by copying t item
void internal_assign_range(I first, I last, is_integer_tag< true > *)
assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23....
const_reverse_iterator crend() const
reverse end const iterator
void resize(size_type n, const_reference t)
Resize the vector, copy t for new elements. Not thread-safe.
iterator grow_by(std::initializer_list< T > init_list)
iterator emplace_back(Args &&... args)
Push item, create item "in place" with provided arguments.
const internal::concurrent_vector_base_v3 & internal_vector_base() const
static void __TBB_EXPORTED_FUNC copy_array(void *dst, const void *src, size_type n)
Copy-construct n instances of T by copying single element pointed to by src, starting at "dst".
reverse_iterator rbegin()
reverse start iterator
reference front()
the first item
const_range_type range(size_t grainsize=1) const
Get const range for iterating with parallel algorithms.
iterator grow_by(size_type delta, const_reference t)
Grow by "delta" elements using copying constructor.
concurrent_vector(std::initializer_list< T > init_list, const allocator_type &a=allocator_type())
Constructor from initializer_list.
__TBB_DEPRECATED concurrent_vector(const concurrent_vector< T, M > &vector, const allocator_type &a=allocator_type())
Copying constructor for vector with different allocator type.
concurrent_vector(size_type n)
Construction with initial size specified by argument n.
const_reverse_iterator rbegin() const
reverse start const iterator
internal::vector_iterator< concurrent_vector, const T > const_iterator
iterator end()
end iterator
reference back()
the last item
concurrent_vector(concurrent_vector &&source)
Move constructor.
const_reference operator[](size_type index) const
Get const reference to element at given index.
concurrent_vector & operator=(std::initializer_list< T > init_list)
Assignment for initializer_list.
iterator begin()
start iterator
T & internal_subscript(size_type index) const
Get reference to element at given index.
~concurrent_vector()
Clear and destroy vector.
iterator grow_by(size_type delta)
Grow by "delta" elements.
void shrink_to_fit()
Optimize memory usage and fragmentation.
T & internal_subscript_with_exceptions(size_type index) const
Get reference to element at given index with errors checks.
size_type capacity() const
Maximum size to which array can grow without allocating more memory. Concurrent allocations are not i...
iterator push_back(T &&item)
Push item, move-aware.
const_iterator begin() const
start const iterator
concurrent_vector & operator=(concurrent_vector &&other)
Move assignment.
iterator grow_by(I first, I last)
size_type max_size() const
Upper bound on argument to reserve.
iterator push_back(const_reference item)
Push item.
internal::concurrent_vector_base_v3::size_type size_type
std::reverse_iterator< const_iterator > const_reverse_iterator
concurrent_vector(I first, I last, const allocator_type &a=allocator_type())
Construction with copying iteration range and given allocator instance.
static void __TBB_EXPORTED_FUNC move_assign_array(void *dst, const void *src, size_type n)
Move-assign (using operator=) n instances of T, starting at "dst" by assigning according element of s...
iterator grow_to_at_least(size_type n)
Append minimal sequence of elements such that size()>=n.
void internal_assign_iterators(I first, I last)
assign by iterators
concurrent_vector(concurrent_vector &&source, const allocator_type &a)
Meets requirements of a forward iterator for STL and a Value for a blocked_range.*‍/.
vector_iterator & operator++()
Pre increment.
friend class internal::vector_iterator
vector_iterator operator++(int)
Post increment.
vector_iterator & operator=(const vector_iterator< Container, typename Container::value_type > &other)
vector_iterator operator--(int)
Post decrement.
vector_iterator & operator--()
Pre decrement.
Value & operator[](ptrdiff_t k) const
vector_iterator & operator+=(ptrdiff_t offset)
Value * my_item
Caches my_vector->internal_subscript(my_index)
vector_iterator operator+(ptrdiff_t offset) const
vector_iterator & operator-=(ptrdiff_t offset)
friend vector_iterator< C, T > operator+(ptrdiff_t offset, const vector_iterator< C, T > &v)
vector_iterator()
Default constructor.
vector_iterator(const vector_iterator< Container, typename Container::value_type > &other)
friend bool operator==(const vector_iterator< C, T > &i, const vector_iterator< C, U > &j)
std::random_access_iterator_tag iterator_category
friend bool operator<(const vector_iterator< C, T > &i, const vector_iterator< C, U > &j)
vector_iterator operator-(ptrdiff_t offset) const
size_t my_index
Index into the vector.
Container * my_vector
concurrent_vector over which we are iterating.
friend ptrdiff_t operator-(const vector_iterator< C, T > &i, const vector_iterator< C, U > &j)
vector_iterator(const Container &vector, size_t index, void *ptr=0)
Base class of concurrent vector implementation.
void *__TBB_EXPORTED_METHOD internal_compact(size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy)
void __TBB_EXPORTED_METHOD internal_throw_exception(size_type) const
Obsolete.
static segment_index_t segment_index_of(size_type index)
atomic< size_type > my_early_size
Requested size of vector.
void *(* vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t)
allocator function pointer
static bool is_first_element_in_segment(size_type element_index)
void __TBB_EXPORTED_METHOD internal_assign(const concurrent_vector_base_v3 &src, size_type element_size, internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy)
friend void swap(segment_t &, segment_t &) __TBB_NOEXCEPT(true)
void *__TBB_EXPORTED_METHOD internal_push_back(size_type element_size, size_type &index)
void(__TBB_EXPORTED_FUNC * internal_array_op2)(void *dst, const void *src, size_type n)
An operation on n-element destination array and n-element source array.
size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result(size_type new_size, size_type element_size, internal_array_op2 init, const void *src)
void __TBB_EXPORTED_METHOD internal_resize(size_type n, size_type element_size, size_type max_size, const void *src, internal_array_op1 destroy, internal_array_op2 init)
void __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3 &v)
void internal_grow(size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src)
static size_type segment_size(segment_index_t k)
size_type __TBB_EXPORTED_METHOD internal_grow_by(size_type delta, size_type element_size, internal_array_op2 init, const void *src)
static segment_index_t segment_base(segment_index_t k)
segment_index_t __TBB_EXPORTED_METHOD internal_clear(internal_array_op1 destroy)
@ pointers_per_short_table
Number of slots for segment pointers inside the class.
static segment_index_t segment_base_index_of(segment_index_t &index)
atomic< segment_t * > my_segment
Pointer to the segments table.
void(__TBB_EXPORTED_FUNC * internal_array_op1)(void *begin, size_type n)
An operation on an n-element array starting at begin.
friend void enforce_segment_allocated(segment_value_t const &s, internal::exception_id exception=eid_bad_last_alloc)
atomic< size_type > my_first_block
count of segments in the first block
segment_t my_storage[pointers_per_short_table]
embedded storage of segment pointers
size_type __TBB_EXPORTED_METHOD internal_capacity() const
void __TBB_EXPORTED_METHOD internal_reserve(size_type n, size_type element_size, size_type max_size)
void __TBB_EXPORTED_METHOD internal_grow_to_at_least(size_type new_size, size_type element_size, internal_array_op2 init, const void *src)
Deprecated entry point for backwards compatibility to TBB 2.1.
void __TBB_EXPORTED_METHOD internal_copy(const concurrent_vector_base_v3 &src, size_type element_size, internal_array_op2 copy)
friend bool operator==(segment_value_t const &lhs, segment_not_used)
friend bool operator==(segment_value_t const &lhs, segment_allocated)
friend bool operator!=(segment_value_t const &lhs, argument_type arg)
friend bool operator==(segment_value_t const &lhs, segment_allocation_failed)
void store(void *allocated_segment_pointer) __TBB_NOEXCEPT(true)
allocator_base(const allocator_type &a=allocator_type())
tbb::internal::allocator_rebind< A, T >::type allocator_type
generic_range_type(const generic_range_type< U > &r)
generic_range_type(I begin_, I end_, size_t grainsize_=1)
generic_range_type(generic_range_type &r, split)
True/false function override helper.
Exception-aware helper class for filling a segment by exception-danger operators of user class.
internal_loop_guide(size_type ntrials, void *ptr)
static const T * as_const_pointer(const void *ptr)
allocator_traits< Alloc >::template rebind_alloc< T >::other type
Base class for types that should not be copied or assigned.
Definition tbb_stddef.h:330
Dummy type that distinguishes splitting constructor from copy constructor.
Definition tbb_stddef.h:416

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.