Intel(R) Threading Building Blocks Doxygen Documentation version 4.2.3
Loading...
Searching...
No Matches
task.cpp
Go to the documentation of this file.
1/*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17// Do not include task.h directly. Use scheduler_common.h instead
18#include "scheduler_common.h"
19#include "governor.h"
20#include "scheduler.h"
21#include "itt_notify.h"
22
24#include "tbb/partitioner.h"
25
26#include <new>
27
28namespace tbb {
29
30namespace internal {
31
32//------------------------------------------------------------------------
33// Methods of allocate_root_proxy
34//------------------------------------------------------------------------
37 __TBB_ASSERT( v, "thread did not activate a task_scheduler_init object?" );
38#if __TBB_TASK_GROUP_CONTEXT
40
41 ITT_STACK_CREATE(p.context->itt_caller);
42#endif
43 // New root task becomes part of the currently running task's cancellation context
44 return v->allocate_task( size, __TBB_CONTEXT_ARG(NULL, p.context) );
45}
46
49 __TBB_ASSERT( v, "thread does not have initialized task_scheduler_init object?" );
50#if __TBB_TASK_GROUP_CONTEXT
51 // No need to do anything here as long as there is no context -> task connection
52#endif /* __TBB_TASK_GROUP_CONTEXT */
54}
55
56#if __TBB_TASK_GROUP_CONTEXT
57//------------------------------------------------------------------------
58// Methods of allocate_root_with_context_proxy
59//------------------------------------------------------------------------
62 __TBB_ASSERT( s, "Scheduler auto-initialization failed?" );
63 __TBB_ASSERT( &my_context, "allocate_root(context) argument is a dereferenced NULL pointer" );
64 task& t = s->allocate_task( size, NULL, &my_context );
65 // Supported usage model prohibits concurrent initial binding. Thus we do not
66 // need interlocked operations or fences to manipulate with my_context.my_kind
68 // If we are in the outermost task dispatch loop of a master thread, then
69 // there is nothing to bind this context to, and we skip the binding part
70 // treating the context as isolated.
71 if ( s->master_outermost_level() )
73 else
75 }
76#if __TBB_FP_CONTEXT
79 my_context.copy_fp_settings( *s->default_context() );
80#endif
82 return t;
83}
84
85void allocate_root_with_context_proxy::free( task& task ) const {
86 internal::generic_scheduler* v = governor::local_scheduler_weak();
87 __TBB_ASSERT( v, "thread does not have initialized task_scheduler_init object?" );
88 // No need to do anything here as long as unbinding is performed by context destructor only.
89 v->free_task<local_task>( task );
90}
91#endif /* __TBB_TASK_GROUP_CONTEXT */
92
93//------------------------------------------------------------------------
94// Methods of allocate_continuation_proxy
95//------------------------------------------------------------------------
97 task* t = (task*)this;
100 task* parent = t->parent();
101 t->prefix().parent = NULL;
102 return s->allocate_task( size, __TBB_CONTEXT_ARG(parent, t->prefix().context) );
103}
104
106 // Restore the parent as it was before the corresponding allocate was called.
107 ((task*)this)->prefix().parent = mytask.parent();
109}
110
111//------------------------------------------------------------------------
112// Methods of allocate_child_proxy
113//------------------------------------------------------------------------
115 task* t = (task*)this;
118 return s->allocate_task( size, __TBB_CONTEXT_ARG(t, t->prefix().context) );
119}
120
121void allocate_child_proxy::free( task& mytask ) const {
123}
124
125//------------------------------------------------------------------------
126// Methods of allocate_additional_child_of_proxy
127//------------------------------------------------------------------------
131 return s->allocate_task( size, __TBB_CONTEXT_ARG(&parent, parent.prefix().context) );
132}
133
135 // Undo the increment. We do not check the result of the fetch-and-decrement.
136 // We could consider be spawning the task if the fetch-and-decrement returns 1.
137 // But we do not know that was the programmer's intention.
138 // Furthermore, if it was the programmer's intention, the program has a fundamental
139 // race condition (that we warn about in Reference manual), because the
140 // reference count might have become zero before the corresponding call to
141 // allocate_additional_child_of_proxy::allocate.
144}
145
146//------------------------------------------------------------------------
147// Support for auto_partitioner
148//------------------------------------------------------------------------
150 const size_t X_FACTOR = 4;
151 return X_FACTOR * governor::local_scheduler()->max_threads_in_arena();
152}
153
154//------------------------------------------------------------------------
155// Methods of affinity_partitioner_base_v3
156//------------------------------------------------------------------------
158 // Check factor to avoid asking for number of workers while there might be no arena.
159 size_t new_size = factor ? factor*governor::local_scheduler()->max_threads_in_arena() : 0;
160 if( new_size!=my_size ) {
161 if( my_array ) {
163 // Following two assignments must be done here for sake of exception safety.
164 my_array = NULL;
165 my_size = 0;
166 }
167 if( new_size ) {
168 my_array = static_cast<affinity_id*>(NFS_Allocate(new_size,sizeof(affinity_id), NULL ));
169 memset( my_array, 0, sizeof(affinity_id)*new_size );
171 }
172 }
173}
174
175} // namespace internal
176
177using namespace tbb::internal;
178
179//------------------------------------------------------------------------
180// task
181//------------------------------------------------------------------------
182
184 __TBB_ASSERT( count>=0, "count must not be negative" );
185 task_prefix &p = prefix();
186 __TBB_ASSERT(p.ref_count==1 && p.state==allocated && self().parent()==this
187 || !(p.extra_state & es_ref_count_active), "ref_count race detected");
188 ITT_NOTIFY(sync_releasing, &p.ref_count);
189 p.ref_count = count;
190}
191
195 __TBB_ASSERT( k>=1, "task's reference count underflowed" );
196 if( k==1 )
197 ITT_NOTIFY( sync_acquired, &prefix().ref_count );
198 return k-1;
199}
200
205 return *v->my_innermost_running_task;
206}
207
209 return true;
210}
211
213 // 1 may be a guard reference for wait_for_all, which was not reset because
214 // of concurrent_wait mode or because prepared root task was not actually used
215 // for spawning tasks (as in structured_task_group).
216 __TBB_ASSERT( (intptr_t)victim.prefix().ref_count <= 1, "Task being destroyed must not have children" );
217 __TBB_ASSERT( victim.state()==task::allocated, "illegal state for victim task" );
218 task* parent = victim.parent();
219 victim.~task();
220 if( parent ) {
221 __TBB_ASSERT( parent->state()!=task::freed && parent->state()!=task::ready,
222 "attempt to destroy child of running or corrupted parent?" );
223 // 'reexecute' and 'executing' are also signs of a race condition, since most tasks
224 // set their ref_count upon entry but "es_ref_count_active" should detect this
225 parent->internal_decrement_ref_count();
226 // Even if the last reference to *parent is removed, it should not be spawned (documented behavior).
227 }
229}
230
233 task* t = list.first;
234 if( t ) {
235 if( &t->prefix().next!=list.next_ptr )
236 s->local_spawn( t->prefix().next, *list.next_ptr );
237 list.clear();
238 }
239 s->local_wait_for_all( *this, t );
240}
241
246}
247
248#if __TBB_TASK_GROUP_CONTEXT
250 prefix().context = &ctx;
253 // If we are in the outermost task dispatch loop of a master thread, then
254 // there is nothing to bind this context to, and we skip the binding part
255 // treating the context as isolated.
256 if ( s->master_outermost_level() )
258 else
259 ctx.bind_to( s );
260 }
261#if __TBB_FP_CONTEXT
264 ctx.copy_fp_settings( *s->default_context() );
265#endif
267}
268#endif /* __TBB_TASK_GROUP_CONTEXT */
269
270} // namespace tbb
271
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition tbb_stddef.h:165
#define __TBB_FetchAndDecrementWrelease(P)
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t new_size
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
void const char const char int ITT_FORMAT __itt_group_sync p
#define __TBB_CONTEXT_ARG(arg1, context)
#define ITT_STACK_CREATE(obj)
Definition itt_notify.h:117
#define ITT_NOTIFY(name, obj)
Definition itt_notify.h:112
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
The graph class.
Identifiers declared inside namespace internal should never be used directly by client code.
Definition atomic.h:65
void __TBB_store_relaxed(volatile T &location, V value)
intptr_t reference_count
A reference count.
Definition task.h:131
void assert_task_valid(const task *)
unsigned short affinity_id
An id as used for specifying affinity.
Definition task.h:139
@ es_ref_count_active
Set if ref_count might be changed by another thread. Used for debugging.
size_t __TBB_EXPORTED_FUNC get_initial_auto_partitioner_divisor()
Definition task.cpp:149
T __TBB_load_relaxed(const volatile T &location)
@ no_cache
Disable caching for a small task.
@ local_task
Task is known to have been allocated by this scheduler.
size_t my_size
Number of elements in my_array.
Definition partitioner.h:87
void __TBB_EXPORTED_METHOD resize(unsigned factor)
Resize my_array.
Definition task.cpp:157
affinity_id * my_array
Array that remembers affinities of tree positions to affinity_id.
Definition partitioner.h:85
void __TBB_EXPORTED_METHOD free(task &) const
Definition task.cpp:134
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition task.cpp:128
static void __TBB_EXPORTED_FUNC destroy(task &victim)
Destroy a task.
Definition task.cpp:212
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
void __TBB_EXPORTED_METHOD free(task &) const
static void __TBB_EXPORTED_FUNC free(task &)
Definition task.cpp:47
static task &__TBB_EXPORTED_FUNC allocate(size_t size)
Definition task.cpp:35
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition task.cpp:96
void __TBB_EXPORTED_METHOD free(task &) const
Definition task.cpp:105
void __TBB_EXPORTED_METHOD free(task &) const
Definition task.cpp:121
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition task.cpp:114
Memory prefix to a task object.
Definition task.h:203
tbb::task * next
"next" field for list of task
Definition task.h:297
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
Definition task.h:274
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
Definition task.h:230
tbb::task * parent
The task whose reference count includes me.
Definition task.h:267
Used to form groups of tasks.
Definition task.h:358
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
Definition task.h:405
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
__itt_caller itt_caller
Used to set and maintain stack stitching point for Intel Performance Tools.
Definition task.h:418
void bind_to(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler and binds it to its parent context.
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
Definition task.h:446
static const kind_type binding_required
Definition task.h:589
Base class for user-defined tasks.
Definition task.h:615
void __TBB_EXPORTED_METHOD change_group(task_group_context &ctx)
Moves this task from its current group into another one.
virtual void __TBB_EXPORTED_METHOD note_affinity(affinity_id id)
Invoked by scheduler to notify task that it ran on unexpected thread.
Definition task.cpp:245
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
Definition task.h:865
@ allocated
task object is freshly allocated or recycled.
Definition task.h:643
@ ready
task is in ready pool, or is going to be put there, or was just taken off.
Definition task.h:641
@ freed
task object is on free list, or is going to be put there, or was just taken off.
Definition task.h:645
state_type state() const
Current execution state.
Definition task.h:912
int ref_count() const
The internal reference count.
Definition task.h:915
internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count()
Decrement reference count and return its new value.
Definition task.cpp:192
static task &__TBB_EXPORTED_FUNC self()
The innermost task being executed or destroyed by the current thread at the moment.
Definition task.cpp:201
void spawn_and_wait_for_all(task &child)
Similar to spawn followed by wait_for_all, but more efficient.
Definition task.h:800
bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const
Obsolete, and only retained for the sake of backward compatibility. Always returns true.
Definition task.cpp:208
virtual ~task()
Destructor.
Definition task.h:629
void increment_ref_count()
Atomically increment reference count.
Definition task.h:771
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
Definition task.h:1002
internal::affinity_id affinity_id
An id as used for specifying affinity.
Definition task.h:940
void __TBB_EXPORTED_METHOD internal_set_ref_count(int count)
Set reference count.
Definition task.cpp:183
A list of children.
Definition task.h:1077
task ** next_ptr
Definition task.h:1080
task * first
Definition task.h:1079
void clear()
Clear the list.
Definition task.h:1121
static generic_scheduler * local_scheduler_weak()
Definition governor.h:134
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
Definition governor.h:129
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
Definition scheduler.h:88
Work stealing task scheduler.
Definition scheduler.h:140
void free_task(task &t)
Put task on free list.
Definition scheduler.h:730
task & allocate_task(size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context))
Allocate task object, either from the heap or a free list.
unsigned max_threads_in_arena()
Returns the concurrency limit of the current arena.
Definition scheduler.h:677

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.