17 #ifndef _TBB_scheduler_H 18 #define _TBB_scheduler_H 25 #include "../rml/include/rml_tbb.h" 29 #if __TBB_SURVIVE_THREAD_SWITCH 33 #if __TBB_PREVIEW_RESUMABLE_TASKS 40 template<
typename SchedulerTraits>
class custom_scheduler;
46 #define EmptyTaskPool ((task**)0) 47 #define LockedTaskPool ((task**)~(intptr_t)0) 58 #if __TBB_PREVIEW_CRITICAL_TASKS 59 bool has_taken_critical_task : 1;
62 #if __TBB_PREVIEW_RESUMABLE_TASKS 68 #if __TBB_PREVIEW_RESUMABLE_TASKS 70 #elif __TBB_PREVIEW_CRITICAL_TASKS 103 #if __TBB_SCHEDULER_OBSERVER 104 observer_proxy* my_last_global_observer;
108 #if __TBB_ARENA_OBSERVER 109 observer_proxy* my_last_local_observer;
112 #if __TBB_TASK_PRIORITY 116 volatile intptr_t *my_ref_top_priority;
119 volatile uintptr_t *my_ref_reload_epoch;
121 #if __TBB_PREVIEW_RESUMABLE_TASKS 126 tbb::atomic<bool>* my_current_is_recalled;
147 #if __TBB_PREVIEW_CRITICAL_TASKS 157 uintptr_t my_rsb_stealing_threshold;
180 #if __TBB_HOARD_NONLOCAL_TASKS 181 task* my_nonlocal_free_list;
199 #if __TBB_COUNT_TASK_NODES 200 intptr_t my_task_node_count;
204 #if __TBB_PREVIEW_RESUMABLE_TASKS 205 enum post_resume_action {
216 typedef void(*suspend_callback_t)(
void*, task::suspend_point);
220 suspend_callback_t suspend_callback;
222 task::suspend_point tag;
225 if (suspend_callback) {
226 __TBB_ASSERT(suspend_callback && user_callback && tag, NULL);
227 suspend_callback(user_callback, tag);
233 co_context my_co_context;
236 post_resume_action my_post_resume_action;
239 void* my_post_resume_arg;
245 void set_post_resume_action(post_resume_action,
void* arg);
248 void do_post_resume_action();
258 bool resume_original_scheduler();
263 friend void recall_function(task::suspend_point tag);
333 #if __TBB_TASK_ISOLATION 358 #if __TBB_PREVIEW_CRITICAL_TASKS 364 bool handled_as_critical(
task& t );
393 #if TBB_USE_ASSERT > 1 429 template<free_task_h
int h>
453 #if __TBB_COUNT_TASK_NODES 454 intptr_t get_task_node_count(
bool count_arena_workers =
false );
474 #if __TBB_TASK_GROUP_CONTEXT 500 uintptr_t my_context_state_propagation_epoch;
506 tbb::atomic<uintptr_t> my_local_ctx_list_update;
508 #if __TBB_TASK_PRIORITY 509 inline intptr_t effective_reference_priority ()
const;
514 task* my_offloaded_tasks;
517 task** my_offloaded_task_list_tail_link;
520 uintptr_t my_local_reload_epoch;
523 volatile bool my_pool_reshuffling_pending;
540 inline void offload_task (
task& t, intptr_t task_priority );
545 void cleanup_local_context_list ();
549 template <
typename T>
558 __TBB_ASSERT(is_alive(ctx),
"referenced task_group_context was destroyed");
559 static const char *msg =
"task_group_context is invalid";
568 #if __TBB_TASK_PRIORITY 572 #if TBB_USE_ASSERT > 1 584 ::rml::server::execution_resource_t master_exec_resource;
588 #if __TBB_TASK_GROUP_CONTEXT 591 tbb::atomic<uintptr_t> my_nonlocal_ctx_list_update;
594 #if __TBB_SURVIVE_THREAD_SWITCH 605 cilk_state_t my_cilk_state;
613 mutable statistics_counters my_counters;
661 #if __TBB_TASK_GROUP_CONTEXT 687 p.extra_state = 0xFF;
691 #if __TBB_COUNT_TASK_NODES 692 --my_task_node_count;
696 #if __TBB_COUNT_TASK_NODES 697 inline intptr_t generic_scheduler::get_task_node_count(
bool count_arena_workers ) {
698 return my_task_node_count + (count_arena_workers?
my_arena->workers_task_node_count(): 0);
711 __TBB_ASSERT ( new_tail <= my_arena_slot->my_task_pool_size,
"task deque end was overwritten" );
721 "Task pool must be locked when calling commit_relocated_tasks()" );
729 template<free_task_h
int h
int>
731 #if __TBB_HOARD_NONLOCAL_TASKS 745 #if __TBB_PREVIEW_RESUMABLE_TASKS 755 }
else if( !(
h&
local_task) &&
p.origin && uintptr_t(
p.origin) < uintptr_t(4096) ) {
760 #if __TBB_HOARD_NONLOCAL_TASKS 762 p.next = my_nonlocal_free_list;
763 my_nonlocal_free_list = &t;
773 #if __TBB_TASK_PRIORITY 774 inline intptr_t generic_scheduler::effective_reference_priority ()
const {
785 inline void generic_scheduler::offload_task (
task& t, intptr_t ) {
788 __TBB_ASSERT( my_offloaded_task_list_tail_link && !*my_offloaded_task_list_tail_link, NULL );
793 my_offloaded_tasks = &t;
797 #if __TBB_PREVIEW_RESUMABLE_TASKS 798 inline void generic_scheduler::set_post_resume_action(post_resume_action pra,
void* arg) {
799 __TBB_ASSERT(my_post_resume_action == PRA_NONE,
"Post resume action has already been set.");
802 my_post_resume_action = pra;
803 my_post_resume_arg = arg;
806 inline bool generic_scheduler::prepare_resume(generic_scheduler& target) {
811 target.set_post_resume_action(PRA_NOTIFY, my_current_is_recalled);
815 target.set_post_resume_action(PRA_CLEANUP,
this);
816 my_target_on_exit = ⌖
822 my_wait_task->prefix().abandoned_scheduler =
this;
823 target.set_post_resume_action(PRA_ABANDON, my_wait_task);
827 inline bool generic_scheduler::resume_original_scheduler() {
829 if (!prepare_resume(target)) {
837 inline void generic_scheduler::resume(generic_scheduler& target) {
841 "The post resume action is not set. Has prepare_resume been called?");
849 #if __TBB_SCHEDULER_OBSERVER 850 target.my_last_global_observer = my_last_global_observer;
852 #if __TBB_ARENA_OBSERVER 853 target.my_last_local_observer = my_last_local_observer;
855 target.attach_mailbox(
affinity_id(target.my_arena_index + 1));
857 #if __TBB_TASK_PRIORITY 858 if (my_offloaded_tasks)
859 my_arena->orphan_offloaded_tasks(*
this);
863 my_co_context.resume(target.my_co_context);
866 do_post_resume_action();
872 inline void generic_scheduler::do_post_resume_action() {
873 __TBB_ASSERT(my_post_resume_action != PRA_NONE,
"The post resume action is not set.");
876 switch (my_post_resume_action) {
879 task_prefix& wait_task_prefix =
static_cast<task*
>(my_post_resume_arg)->prefix();
880 reference_count old_ref_count = __TBB_FetchAndAddW(&wait_task_prefix.ref_count, internal::abandon_flag);
882 if (old_ref_count == 1) {
886 tbb::task::resume(wait_task_prefix.abandoned_scheduler);
892 callback_t callback = *
static_cast<callback_t*
>(my_post_resume_arg);
903 to_cleanup->my_arena->my_co_cache.push(to_cleanup);
908 tbb::atomic<bool>& scheduler_recall_flag = *
static_cast<tbb::atomic<bool>*
>(my_post_resume_arg);
909 scheduler_recall_flag =
true;
917 my_post_resume_action = PRA_NONE;
918 my_post_resume_arg = NULL;
921 struct recall_functor {
922 tbb::atomic<bool>* scheduler_recall_flag;
924 recall_functor(tbb::atomic<bool>* recall_flag_) :
925 scheduler_recall_flag(recall_flag_) {}
927 void operator()(task::suspend_point ) {
928 *scheduler_recall_flag =
true;
938 generic_scheduler&
s = *
static_cast<generic_scheduler*
>(arg);
943 s.do_post_resume_action();
946 __TBB_ASSERT(
s.my_innermost_running_task ==
s.my_dummy_task, NULL);
948 s.local_wait_for_all(*
s.my_dummy_task, NULL);
951 s.resume(*
s.my_target_on_exit);
957 #if __TBB_TASK_GROUP_CONTEXT 962 template <
bool report_tasks>
963 class context_guard_helper {
964 const task_group_context *curr_ctx;
966 cpu_ctl_env guard_cpu_ctl_env;
967 cpu_ctl_env curr_cpu_ctl_env;
970 context_guard_helper() : curr_ctx( NULL ) {
972 guard_cpu_ctl_env.get_env();
973 curr_cpu_ctl_env = guard_cpu_ctl_env;
976 ~context_guard_helper() {
978 if ( curr_cpu_ctl_env != guard_cpu_ctl_env )
979 guard_cpu_ctl_env.set_env();
981 if ( report_tasks && curr_ctx )
986 void set_ctx(
const task_group_context *ctx ) {
987 generic_scheduler::assert_context_valid( ctx );
989 const cpu_ctl_env &ctl = *
punned_cast<cpu_ctl_env*>( &ctx->my_cpu_ctl_env );
991 if ( ctl != curr_cpu_ctl_env ) {
992 curr_cpu_ctl_env = ctl;
993 curr_cpu_ctl_env.set_env();
996 if ( report_tasks && ctx != curr_ctx ) {
1008 #if __TBB_FP_CONTEXT 1009 if ( curr_cpu_ctl_env != guard_cpu_ctl_env ) {
1010 guard_cpu_ctl_env.set_env();
1011 curr_cpu_ctl_env = guard_cpu_ctl_env;
void * __TBB_get_bsp()
Retrieves the current RSE backing store pointer. IA64 specific.
#define GATHER_STATISTIC(x)
bool can_steal()
Returns true if stealing is allowed.
#define __TBB_store_release
bool cleanup_master(bool blocking_terminate)
Perform necessary cleanup when a master thread stops using TBB.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
task is running, and will be destroyed after method execute() completes.
A scheduler with a customized evaluation loop.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
task * next_offloaded
Pointer to the next offloaded lower priority task.
const size_t task_prefix_reservation_size
Number of bytes reserved for a task prefix.
bool is_worker() const
True if running on a worker thread, false otherwise.
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
task * steal_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempts to steal a task from a randomly chosen thread/scheduler.
#define __TBB_ISOLATION_EXPR(isolation)
Bitwise-OR of local_task and small_task.
void co_local_wait_for_all(void *)
context_list_node_t * my_prev
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
void commit_relocated_tasks(size_t new_tail)
Makes relocated tasks visible to thieves and releases the local task pool.
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
unsigned my_num_slots
The number of slots in the arena.
void free_task(task &t)
Put task on free list.
task * prepare_for_spawning(task *t)
Checks if t is affinitized to another thread, and if so, bundles it as proxy.
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
uintptr_t my_stealing_threshold
Position in the call stack specifying its maximal filling when stealing is still allowed.
static const size_t null_arena_index
unsigned char state
A task::state_type, stored as a byte for compactness.
void publish_task_pool()
Used by workers to enter the task pool.
task object is freshly allocated or recycled.
static const size_t quick_task_size
If sizeof(task) is <=quick_task_size, it is handled on a free list instead of malloc'd.
static const unsigned ref_external
Reference increment values for externals and workers.
T punned_cast(U *ptr)
Cast between unrelated pointer types.
virtual void local_wait_for_all(task &parent, task *child)=0
task & allocate_task(size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context))
Allocate task object, either from the heap or a free list.
__TBB_atomic size_t tail
Index of the element following the last ready task in the deque.
task * my_dummy_task
Fake root task created by slave threads.
void attach(mail_outbox &putter)
Attach inbox to a corresponding outbox.
bool my_auto_initialized
True if *this was created by automatic TBB initialization.
#define __TBB_CONTEXT_ARG(arg1, context)
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
task * my_return_list
List of small tasks that have been returned to this scheduler by other schedulers.
__TBB_atomic intptr_t my_small_task_count
Number of small tasks that have been allocated by this scheduler.
Disable caching for a small task.
Task is known to have been allocated by this scheduler.
bool type
Indicates that a scheduler acts as a master or a worker.
void reset_task_pool_and_leave()
Resets head and tail indices to 0, and leaves task pool.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void __TBB_store_relaxed(volatile T &location, V value)
void destroy()
Destroy and deallocate this scheduler object.
void unlock_task_pool(arena_slot *victim_arena_slot, task **victim_task_pool) const
Unlocks victim's task pool.
task ** lock_task_pool(arena_slot *victim_arena_slot) const
Locks victim's task pool, and returns pointer to it. The pointer can be NULL.
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
bool is_quiescent_local_task_pool_reset() const
void spawn(task &first, task *&next) __TBB_override
For internal use only.
void local_spawn(task *first, task *&next)
task **__TBB_atomic task_pool
task * get_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get a task from the local pool.
static bool is_version_3_task(task &t)
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Task is known to be a small task.
void __TBB_store_with_release(volatile T &location, V value)
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
void cleanup_scheduler()
Cleans up this scheduler (the scheduler might be destroyed).
task object is on free list, or is going to be put there, or was just taken off.
unsigned num_workers_active() const
The number of workers active in the arena.
bool outermost_level() const
True if the scheduler is on the outermost dispatch level.
Base class for user-defined tasks.
void init_stack_info()
Sets up the data necessary for the stealing limiting heuristics.
static generic_scheduler * create_worker(market &m, size_t index, bool geniune)
Initialize a scheduler for a worker thread.
bool is_quiescent_local_task_pool_empty() const
bool is_task_pool_published() const
void commit_spawned_tasks(size_t new_tail)
Makes newly spawned tasks visible to thieves.
scheduler_properties my_properties
affinity_id my_affinity_id
The mailbox id assigned to this scheduler.
void free_nonlocal_small_task(task &t)
Free a small task t that that was allocated by a different scheduler.
void attach_mailbox(affinity_id id)
intptr_t isolation_tag
A tag for task isolation.
A lock that occupies a single byte.
virtual task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation))=0
Try getting a task from other threads (via mailbox, stealing, FIFO queue, orphans adoption)...
__TBB_atomic size_t head
Index of the first ready task in the deque.
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena...
task * my_free_list
Free list of small tasks that can be reused.
Memory prefix to a task object.
Bit-field representing properties of a sheduler.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
market * my_market
The market I am in.
void const char const char int ITT_FORMAT __itt_group_sync p
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
Data structure to be inherited by the types that can form intrusive lists.
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
unsigned max_threads_in_arena()
Returns the concurrency limit of the current arena.
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
void enqueue(task &, void *reserved) __TBB_override
For internal use only.
bool is_local_task_pool_quiescent() const
static bool is_proxy(const task &t)
True if t is a task_proxy.
unsigned short affinity_id
An id as used for specifying affinity.
task * steal_task_from(__TBB_ISOLATION_ARG(arena_slot &victim_arena_slot, isolation_tag isolation))
Steal task from another scheduler's ready pool.
bool master_outermost_level() const
True if the scheduler is on the outermost dispatch level in a master thread.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
task * get_mailbox_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempt to get a task from the mailbox.
Class representing source of mail.
intptr_t my_priority
Priority level of the task group (in normalized representation)
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
#define __TBB_ISOLATION_ARG(arg1, isolation)
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
generic_scheduler(market &, bool)
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
auto first(Container &c) -> decltype(begin(c))
free_task_hint
Optimization hint to free_task that enables it omit unnecessary tests and code.
task is in ready pool, or is going to be put there, or was just taken off.
void const char const char int ITT_FORMAT __itt_group_sync s
bool outermost
Indicates that a scheduler is on outermost level.
static const size_t min_task_pool_size
context_list_node_t * my_next
void deallocate_task(task &t)
Return task object to the memory allocator.
Used to form groups of tasks.
A fast random number generator.
Work stealing task scheduler.
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
void local_spawn_root_and_wait(task *first, task *&next)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
void attach_arena(arena *, size_t index, bool is_master)
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
#define ITT_NOTIFY(name, obj)
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
T __TBB_load_relaxed(const volatile T &location)
void release_task_pool() const
Unlocks the local task pool.
static task * plugged_return_list()
Special value used to mark my_return_list as not taking any more entries.
state_type state() const
Current execution state.
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
void poison_pointer(T *__TBB_atomic &)
long my_ref_count
Reference count for scheduler.
void leave_task_pool()
Leave the task pool.
void assert_task_pool_valid() const
#define ITT_TASK_BEGIN(type, name, id)
void spawn_root_and_wait(task &first, task *&next) __TBB_override
For internal use only.
unsigned char
Reserved bits.
intptr_t reference_count
A reference count.
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
void acquire_task_pool() const
Locks the local task pool.
static const kind_type dying
size_t prepare_task_pool(size_t n)
Makes sure that the task pool can accommodate at least n more elements.
static generic_scheduler * create_master(arena *a)
Initialize a scheduler for a master thread.
void nested_arena_entry(arena *, size_t)
bool worker_outermost_level() const
True if the scheduler is on the outermost dispatch level in a worker thread.