28 #if __TBB_STATISTICS_STDOUT 47 #if __TBB_TASK_GROUP_CONTEXT 52 #if __TBB_TASK_PRIORITY 59 my_ref_top_priority = &a->my_top_priority;
60 my_ref_reload_epoch = &a->my_reload_epoch;
62 my_local_reload_epoch = *my_ref_reload_epoch;
68 return !slot &&
as_atomic( slot ).compare_and_swap( &s, NULL ) == NULL;
72 if ( lower >= upper )
return out_of_arena;
75 if ( index < lower || index >= upper ) index = s.
my_random.
get() % (upper - lower) + lower;
78 for (
size_t i = index; i < upper; ++i )
79 if (
occupy_slot(my_slots[i].my_scheduler, s) )
return i;
80 for (
size_t i = lower; i < index; ++i )
81 if (
occupy_slot(my_slots[i].my_scheduler, s) )
return i;
85 template <
bool as_worker>
88 size_t index = as_worker ? out_of_arena : occupy_free_slot_in_range( s, 0, my_num_reserved_slots );
89 if ( index == out_of_arena ) {
91 index = occupy_free_slot_in_range( s, my_num_reserved_slots, my_num_slots );
93 if ( index == out_of_arena )
98 atomic_update( my_limit, (
unsigned)(index + 1), std::less<unsigned>() );
110 size_t index = occupy_free_slot<
true>(
s );
111 if ( index == out_of_arena )
114 __TBB_ASSERT( index >= my_num_reserved_slots,
"Workers cannot occupy reserved slots" );
117 #if !__TBB_FP_CONTEXT 118 my_cpu_ctl_env.set_env();
121 #if __TBB_ARENA_OBSERVER 122 __TBB_ASSERT( !s.my_last_local_observer,
"There cannot be notified local observers when entering arena" );
123 my_observers.notify_entry_observers( s.my_last_local_observer,
true );
138 "Worker cannot leave arena while its task pool is not reset" );
142 if ( is_recall_requested() )
155 #if __TBB_ARENA_OBSERVER 156 my_observers.notify_exit_observers( s.my_last_local_observer,
true );
157 s.my_last_local_observer = NULL;
159 #if __TBB_TASK_PRIORITY 160 if ( s.my_offloaded_tasks )
161 orphan_offloaded_tasks( s );
164 ++s.my_counters.arena_roundtrips;
165 *my_slots[index].my_counters += s.my_counters;
166 s.my_counters.reset();
179 on_thread_leaving<ref_worker>();
183 __TBB_ASSERT( !my_guard,
"improperly allocated arena?" );
186 #if __TBB_TASK_PRIORITY 187 __TBB_ASSERT( !my_reload_epoch && !my_orphaned_tasks && !my_skipped_fifo_priority,
"New arena object is not zeroed" );
192 my_num_slots = num_arena_slots(num_slots);
193 my_num_reserved_slots = num_reserved_slots;
194 my_max_num_workers = num_slots-num_reserved_slots;
195 my_references = ref_external;
196 #if __TBB_TASK_PRIORITY 197 my_bottom_priority = my_top_priority = normalized_normal_priority;
200 #if __TBB_ARENA_OBSERVER 201 my_observers.my_arena =
this;
203 #if __TBB_PREVIEW_RESUMABLE_TASKS 204 my_co_cache.init(4 * num_slots);
206 __TBB_ASSERT ( my_max_num_workers <= my_num_slots, NULL );
208 for(
unsigned i = 0; i < my_num_slots; ++i ) {
209 __TBB_ASSERT( !my_slots[i].my_scheduler && !my_slots[i].task_pool, NULL );
212 #if __TBB_PREVIEW_RESUMABLE_TASKS 213 __TBB_ASSERT( !my_slots[i].my_scheduler_is_recalled, NULL );
215 ITT_SYNC_CREATE(my_slots + i, SyncType_Scheduler, SyncObj_WorkerTaskPool);
216 mailbox(i+1).construct();
218 my_slots[i].hint_for_pop = i;
219 #if __TBB_PREVIEW_CRITICAL_TASKS 220 my_slots[i].hint_for_critical = i;
223 my_slots[i].my_counters =
new (
NFS_Allocate(1,
sizeof(statistics_counters), NULL) ) statistics_counters;
226 my_task_stream.initialize(my_num_slots);
227 ITT_SYNC_CREATE(&my_task_stream, SyncType_Scheduler, SyncObj_TaskStream);
228 #if __TBB_PREVIEW_CRITICAL_TASKS 229 my_critical_task_stream.initialize(my_num_slots);
230 ITT_SYNC_CREATE(&my_critical_task_stream, SyncType_Scheduler, SyncObj_CriticalTaskStream);
232 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 233 my_local_concurrency_mode =
false;
234 my_global_concurrency_mode =
false;
236 #if !__TBB_FP_CONTEXT 237 my_cpu_ctl_env.get_env();
245 size_t n = allocation_size(num_arena_slots(num_slots));
246 unsigned char* storage = (
unsigned char*)
NFS_Allocate( 1, n, NULL );
248 memset( storage, 0, n );
249 return *
new( storage + num_arena_slots(num_slots) *
sizeof(
mail_outbox) )
arena(m, num_slots, num_reserved_slots);
254 __TBB_ASSERT( !my_references,
"There are threads in the dying arena" );
255 __TBB_ASSERT( !my_num_workers_requested && !my_num_workers_allotted,
"Dying arena requests workers" );
256 __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers,
"Inconsistent state of a dying arena" );
257 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 260 #if !__TBB_STATISTICS_EARLY_DUMP 264 intptr_t drained = 0;
265 for (
unsigned i = 0; i < my_num_slots; ++i ) {
266 __TBB_ASSERT( !my_slots[i].my_scheduler,
"arena slot is not empty" );
270 my_slots[i].free_task_pool();
272 NFS_Free( my_slots[i].my_counters );
274 drained += mailbox(i+1).drain();
276 __TBB_ASSERT( my_task_stream.drain()==0,
"Not all enqueued tasks were executed");
277 #if __TBB_PREVIEW_RESUMABLE_TASKS 279 my_co_cache.cleanup();
281 #if __TBB_PREVIEW_CRITICAL_TASKS 282 __TBB_ASSERT( my_critical_task_stream.drain()==0,
"Not all critical tasks were executed");
284 #if __TBB_COUNT_TASK_NODES 285 my_market->update_task_node_count( -drained );
289 #if __TBB_TASK_GROUP_CONTEXT 290 __TBB_ASSERT( my_default_ctx,
"Master thread never entered the arena?" );
291 my_default_ctx->~task_group_context();
294 #if __TBB_ARENA_OBSERVER 295 if ( !my_observers.empty() )
296 my_observers.clear();
298 void* storage = &mailbox(my_num_slots);
300 __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers, NULL );
302 #if TBB_USE_ASSERT > 1 303 memset( storage, 0, allocation_size(my_num_slots) );
309 void arena::dump_arena_statistics () {
310 statistics_counters total;
311 for(
unsigned i = 0; i < my_num_slots; ++i ) {
312 #if __TBB_STATISTICS_EARLY_DUMP 315 *my_slots[i].my_counters += s->my_counters;
320 total += *my_slots[i].my_counters;
321 dump_statistics( *my_slots[i].my_counters, i );
324 dump_statistics( *my_slots[0].my_counters, 0 );
325 #if __TBB_STATISTICS_STDOUT 326 #if !__TBB_STATISTICS_TOTALS_ONLY 327 printf(
"----------------------------------------------\n" );
329 dump_statistics( total, workers_counters_total );
330 total += *my_slots[0].my_counters;
331 dump_statistics( total, arena_counters_total );
332 #if !__TBB_STATISTICS_TOTALS_ONLY 333 printf(
"==============================================\n" );
339 #if __TBB_TASK_PRIORITY 344 inline bool arena::may_have_tasks (
generic_scheduler*
s,
bool& tasks_present,
bool& dequeuing_possible ) {
348 if ( s->my_pool_reshuffling_pending ) {
351 tasks_present =
true;
354 if ( s->my_offloaded_tasks ) {
355 tasks_present =
true;
356 if ( s->my_local_reload_epoch < *s->my_ref_reload_epoch ) {
368 ++my_abandonment_epoch;
369 __TBB_ASSERT( s.my_offloaded_task_list_tail_link && !*s.my_offloaded_task_list_tail_link, NULL );
372 orphans =
const_cast<task*
>(my_orphaned_tasks);
373 *s.my_offloaded_task_list_tail_link = orphans;
374 }
while (
as_atomic(my_orphaned_tasks).compare_and_swap(s.my_offloaded_tasks, orphans) != orphans );
375 s.my_offloaded_tasks = NULL;
377 s.my_offloaded_task_list_tail_link = NULL;
385 if ( !my_task_stream.empty(
p) )
396 if ( has_enqueued_tasks() ) {
397 advertise_new_work<work_enqueued>();
398 #if __TBB_TASK_PRIORITY 402 if ( !my_task_stream.empty(
p) ) {
403 if ( p < my_bottom_priority || p > my_top_priority )
417 case SNAPSHOT_FULL: {
421 if( my_pool_state.compare_and_swap( busy, SNAPSHOT_FULL )==SNAPSHOT_FULL ) {
429 #if __TBB_TASK_PRIORITY 431 intptr_t top_priority = my_top_priority;
435 for( k=0; k<n; ++k ) {
442 if( my_pool_state!=busy )
446 bool work_absent = k == n;
447 #if __TBB_PREVIEW_CRITICAL_TASKS 448 bool no_critical_tasks = my_critical_task_stream.empty(0);
449 work_absent &= no_critical_tasks;
451 #if __TBB_TASK_PRIORITY 454 bool tasks_present = !work_absent || my_orphaned_tasks;
455 bool dequeuing_possible =
false;
460 uintptr_t abandonment_epoch = my_abandonment_epoch;
466 the_context_state_propagation_mutex.lock();
467 work_absent = !may_have_tasks( my_slots[0].my_scheduler, tasks_present, dequeuing_possible );
468 the_context_state_propagation_mutex.unlock();
481 for( k = 1; work_absent && k < n; ++k ) {
482 if( my_pool_state!=busy )
484 work_absent = !may_have_tasks( my_slots[k].my_scheduler, tasks_present, dequeuing_possible );
487 work_absent = work_absent
489 && abandonment_epoch == my_abandonment_epoch;
493 if( my_pool_state==busy ) {
494 #if __TBB_TASK_PRIORITY 495 bool no_fifo_tasks = my_task_stream.empty(top_priority);
496 work_absent = work_absent && (!dequeuing_possible || no_fifo_tasks)
497 && top_priority == my_top_priority && reload_epoch == my_reload_epoch;
499 bool no_fifo_tasks = my_task_stream.empty(0);
500 work_absent = work_absent && no_fifo_tasks;
503 #if __TBB_TASK_PRIORITY 504 if ( top_priority > my_bottom_priority ) {
505 if (
my_market->lower_arena_priority(*
this, top_priority - 1, reload_epoch)
506 && !my_task_stream.empty(top_priority) )
508 atomic_update( my_skipped_fifo_priority, top_priority, std::less<intptr_t>());
511 else if ( !tasks_present && !my_orphaned_tasks && no_fifo_tasks ) {
515 int current_demand = (
int)my_max_num_workers;
516 if( my_pool_state.compare_and_swap( SNAPSHOT_EMPTY, busy )==busy ) {
520 restore_priority_if_need();
524 #if __TBB_TASK_PRIORITY 529 my_pool_state.compare_and_swap( SNAPSHOT_FULL, busy );
541 #if __TBB_COUNT_TASK_NODES 542 intptr_t arena::workers_task_node_count() {
544 for(
unsigned i = 1; i < my_num_slots; ++i ) {
547 result += s->my_task_node_count;
555 #if __TBB_RECYCLE_TO_ENQUEUE 566 __TBB_ASSERT( ref_count!=0,
"attempt to enqueue task whose parent has a ref_count==0 (forgot to set_ref_count?)" );
567 __TBB_ASSERT( ref_count>0,
"attempt to enqueue task whose parent has a ref_count<0" );
572 #if __TBB_PREVIEW_CRITICAL_TASKS 581 #if __TBB_TASK_ISOLATION 591 advertise_new_work<work_spawned>();
597 #if __TBB_TASK_PRIORITY 598 intptr_t
p = prio ? normalize_priority(
priority_t(prio)) : normalized_normal_priority;
599 assert_priority_valid(p);
600 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 603 my_task_stream.push( &t, p, random );
605 if ( p != my_top_priority )
606 my_market->update_arena_priority( *
this, p );
608 __TBB_ASSERT_EX(prio == 0,
"the library is not configured to respect the task priority");
609 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 612 my_task_stream.push( &t, 0, random );
615 advertise_new_work<work_enqueued>();
616 #if __TBB_TASK_PRIORITY 617 if ( p != my_top_priority )
618 my_market->update_arena_priority( *
this, p );
625 : my_scheduler(*s), my_orig_ctx(NULL), same_arena(same) {
627 my_orig_state.my_properties = my_scheduler.my_properties;
628 my_orig_state.my_innermost_running_task = my_scheduler.my_innermost_running_task;
629 mimic_outermost_level(a, type);
632 #if __TBB_PREVIEW_RESUMABLE_TASKS 633 my_scheduler.my_properties.genuine =
true;
634 my_scheduler.my_current_is_recalled = NULL;
636 mimic_outermost_level(a, type);
641 #if __TBB_TASK_GROUP_CONTEXT 642 my_scheduler.my_dummy_task->prefix().context = my_orig_ctx;
645 my_scheduler.my_properties = my_orig_state.my_properties;
646 my_scheduler.my_innermost_running_task = my_orig_state.my_innermost_running_task;
648 my_scheduler.nested_arena_exit();
650 #if __TBB_TASK_PRIORITY 651 my_scheduler.my_local_reload_epoch = *my_orig_state.my_ref_reload_epoch;
667 #if __TBB_PREVIEW_CRITICAL_TASKS 670 #if __TBB_TASK_GROUP_CONTEXT 683 #if __TBB_TASK_PRIORITY 684 if ( my_offloaded_tasks )
685 my_arena->orphan_offloaded_tasks( *
this );
686 my_offloaded_tasks = NULL;
696 #if __TBB_ARENA_OBSERVER 697 my_last_local_observer = 0;
698 my_arena->my_observers.notify_entry_observers( my_last_local_observer,
false );
700 #if __TBB_PREVIEW_RESUMABLE_TASKS 706 #if __TBB_ARENA_OBSERVER 707 my_arena->my_observers.notify_exit_observers( my_last_local_observer,
false );
709 #if __TBB_TASK_PRIORITY 710 if ( my_offloaded_tasks )
711 my_arena->orphan_offloaded_tasks( *
this );
728 #if __TBB_PREVIEW_RESUMABLE_TASKS 729 class resume_task :
public task {
736 if (s->prepare_resume(my_target)) {
737 s->resume(my_target);
741 prefix().state = task::to_resume;
766 void internal_suspend(
void* suspend_callback,
void* user_callback) {
769 bool is_recalled = *s.
my_arena_slot->my_scheduler_is_recalled;
772 generic_scheduler::callback_t callback = {
773 (generic_scheduler::suspend_callback_t)suspend_callback, user_callback, &s };
774 target.set_post_resume_action(generic_scheduler::PRA_CALLBACK, &callback);
778 void internal_resume(task::suspend_point tag) {
797 task::suspend_point internal_current_suspend_point() {
809 namespace interface7 {
814 if( my_max_concurrency < 1 )
815 #if __TBB_NUMA_SUPPORT 816 my_max_concurrency = tbb::internal::numa_topology::default_concurrency(numa_id());
820 __TBB_ASSERT( my_master_slots <= (
unsigned)my_max_concurrency,
"Number of slots reserved for master should not exceed arena concurrency");
825 #if __TBB_TASK_GROUP_CONTEXT 829 new_arena->my_default_ctx->capture_fp_settings();
838 #if __TBB_TASK_GROUP_CONTEXT 841 #if __TBB_TASK_GROUP_CONTEXT || __TBB_NUMA_SUPPORT 843 #if __TBB_TASK_GROUP_CONTEXT 844 new_arena->my_default_ctx->my_version_and_traits |= my_version_and_traits & exact_exception_flag;
845 as_atomic(my_context) = new_arena->my_default_ctx;
847 #if __TBB_NUMA_SUPPORT 848 my_arena->my_numa_binding_observer = tbb::internal::construct_binding_observer(
860 #if __TBB_NUMA_SUPPORT 861 if(
my_arena->my_numa_binding_observer != NULL ) {
862 tbb::internal::destroy_binding_observer(
my_arena->my_numa_binding_observer);
863 my_arena->my_numa_binding_observer = NULL;
869 #if __TBB_TASK_GROUP_CONTEXT 884 #if __TBB_TASK_GROUP_CONTEXT 885 my_context =
my_arena->my_default_ctx;
886 my_version_and_traits |= my_context->my_version_and_traits & exact_exception_flag;
900 #if __TBB_TASK_GROUP_CONTEXT 903 "The task will not be executed because default task_group_context of task_arena is cancelled. Has previously enqueued task thrown an exception?");
908 class delegated_task :
public task {
924 #if __TBB_TASK_GROUP_CONTEXT 925 orig_ctx = t->prefix().context;
926 t->prefix().context = s.
my_arena->my_default_ctx;
932 ~outermost_context() {
933 #if __TBB_TASK_GROUP_CONTEXT 935 t->prefix().context = orig_ctx;
949 #if __TBB_PREVIEW_RESUMABLE_TASKS 950 reference_count old_ref_count = __TBB_FetchAndStoreW(&prefix.ref_count, 1);
952 if (old_ref_count == internal::abandon_flag + 2) {
955 tbb::task::resume(prefix.abandoned_scheduler);
964 : my_delegate(d), my_monitor(s), my_root(t) {}
966 bool operator()(uintptr_t ctx)
const {
return (
void*)ctx == (
void*)&my_delegate; }
980 #if __TBB_USE_OPTIONAL_RTTI 994 (internal::forward< graph_funct >(deleg_funct->
my_func)), 0);
999 #if __TBB_TASK_GROUP_CONTEXT 1001 #if __TBB_FP_CONTEXT 1022 #if TBB_USE_EXCEPTIONS 1035 #if TBB_USE_EXCEPTIONS 1038 TbbRethrowException(pe);
1041 #if __TBB_USE_OPTIONAL_RTTI 1049 #if TBB_USE_EXCEPTIONS 1055 #if TBB_USE_EXCEPTIONS 1059 if (my_version_and_traits & exact_exception_flag)
throw;
1073 class wait_task :
public task {
1127 #if __TBB_TASK_ISOLATION 1132 isolation_guard(
isolation_tag &isolation ) : guarded( isolation ), previous_value( isolation ) {}
1133 ~isolation_guard() {
1134 guarded = previous_value;
1141 __TBB_ASSERT( s,
"this_task_arena::isolate() needs an initialized scheduler" );
1146 isolation_guard guard( current_isolation );
1147 current_isolation = isolation? isolation :
reinterpret_cast<isolation_tag>(&
d);
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
bool worker_outermost_level() const
True if the scheduler is on the outermost dispatch level in a worker thread.
void __TBB_EXPORTED_METHOD internal_attach()
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id tail
unsigned my_num_slots
The number of slots in the arena.
arena(market &, unsigned max_num_workers, unsigned num_reserved_slots)
Constructor.
Smart holder for the empty task class with automatic destruction.
void __TBB_EXPORTED_METHOD internal_initialize()
T1 atomic_update(tbb::atomic< T1 > &dst, T2 newValue, Pred compare)
Atomically replaces value of dst with newValue if they satisfy condition of compare predicate...
unsigned my_num_reserved_slots
The number of reserved slots (can be occupied only by masters).
void __TBB_EXPORTED_METHOD internal_wait() const
void notify(const P &predicate)
Notify waiting threads of the event that satisfies the given predicate.
exception_container_type * my_exception
Pointer to the container storing exception being propagated across this task group.
void adjust_demand(arena &, int delta)
Request that arena's need in workers should be adjusted.
static bool occupy_slot(generic_scheduler *&slot, generic_scheduler &s)
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
void __TBB_EXPORTED_METHOD internal_execute(delegate_base &) const
nested_arena_context(generic_scheduler *s, arena *a, size_t slot_index, bool type, bool same)
tbb::atomic< uintptr_t > my_pool_state
Current task pool state and estimate of available tasks amount.
bool master_outermost_level() const
True if the scheduler is on the outermost dispatch level in a master thread.
bool is_quiescent_local_task_pool_reset() const
static generic_scheduler * local_scheduler_if_initialized()
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
T __TBB_load_relaxed(const volatile T &location)
void __TBB_store_with_release(volatile T &location, V value)
atomic< T > & as_atomic(T &t)
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
static generic_scheduler * local_scheduler_weak()
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
static int unsigned num_arena_slots(unsigned num_slots)
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
Base class for types that should not be copied or assigned.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event ITT_FORMAT __itt_group_mark d void const wchar_t const wchar_t int ITT_FORMAT __itt_group_sync __itt_group_fsync x void const wchar_t int const wchar_t int int ITT_FORMAT __itt_group_sync __itt_group_fsync x void ITT_FORMAT __itt_group_sync __itt_group_fsync p void ITT_FORMAT __itt_group_sync __itt_group_fsync p void size_t ITT_FORMAT lu no args __itt_obj_prop_t __itt_obj_state_t ITT_FORMAT d const char ITT_FORMAT s __itt_frame ITT_FORMAT p const char const char ITT_FORMAT s __itt_counter ITT_FORMAT p __itt_counter unsigned long long ITT_FORMAT lu const wchar_t ITT_FORMAT S __itt_mark_type const wchar_t ITT_FORMAT S __itt_mark_type const char ITT_FORMAT s __itt_mark_type ITT_FORMAT d __itt_caller ITT_FORMAT p __itt_caller ITT_FORMAT p no args const __itt_domain __itt_clock_domain unsigned long long __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_id void ITT_FORMAT p const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_string_handle __itt_scope scope
concurrent_monitor my_exit_monitors
Waiting object for master threads that cannot join the arena.
task is running, and will be destroyed after method execute() completes.
void __TBB_EXPORTED_METHOD internal_terminate()
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
#define __TBB_CONTEXT_ARG(arg1, context)
size_t occupy_free_slot_in_range(generic_scheduler &s, size_t lower, size_t upper)
Tries to occupy a slot in the specified range.
unsigned num_workers_active() const
The number of workers active in the arena.
bool is_worker() const
True if running on a worker thread, false otherwise.
void on_thread_leaving()
Notification that worker or master leaves its arena.
market * my_market
The market I am in.
state_type state() const
Current execution state.
static unsigned default_num_threads()
Exception container that preserves the exact copy of the original exception.
Class representing where mail is put.
bool commit_wait(thread_context &thr)
Commit wait if event count has not changed; otherwise, cancel wait.
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
void mimic_outermost_level(arena *a, bool type)
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
task is in ready pool, or is going to be put there, or was just taken off.
unsigned char state
A task::state_type, stored as a byte for compactness.
task * my_dummy_task
Fake root task created by slave threads.
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
void make_critical(task &t)
isolation_tag isolation
The tag used for task isolation.
static const int priority_critical
bool outermost
Indicates that a scheduler is on outermost level.
size_t occupy_free_slot(generic_scheduler &s)
Tries to occupy a slot in the arena. On success, returns the slot index; if no slot is available...
void nested_arena_entry(arena *, size_t)
uintptr_t my_arenas_aba_epoch
ABA prevention marker to assign to newly created arenas.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
T __TBB_load_with_acquire(const volatile T &location)
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
void set_is_idle(bool value)
Indicate whether thread that reads this mailbox is idle.
task_group_context * context()
This method is deprecated and will be removed in the future.
void restore_priority_if_need()
If enqueued tasks found, restore arena priority and task presence status.
binary_semaphore for concurrent monitor
Memory prefix to a task object.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
intptr_t isolation_tag
A tag for task isolation.
void const char const char int ITT_FORMAT __itt_group_sync s
Set if ref_count might be changed by another thread. Used for debugging.
void attach_mailbox(affinity_id id)
scheduler_state my_orig_state
void attach_arena(arena *, size_t index, bool is_master)
intptr_t reference_count
A reference count.
bool is_idle_state(bool value) const
Indicate whether thread that reads this mailbox is idle.
generic_scheduler & my_scheduler
bool release(bool is_public, bool blocking_terminate)
Decrements market's refcount and destroys it in the end.
scheduler_properties my_properties
void process(generic_scheduler &)
Registers the worker with the arena and enters TBB scheduler dispatch loop.
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert ...
internal::arena * my_arena
NULL if not currently initialized.
static const unsigned ref_external
Reference increment values for externals and workers.
static const pool_state_t SNAPSHOT_EMPTY
No tasks to steal since last snapshot was taken.
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
int ref_count() const
The internal reference count.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
#define __TBB_ISOLATION_ARG(arg1, isolation)
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
bool type
Indicates that a scheduler acts as a master or a worker.
unsigned short affinity_id
An id as used for specifying affinity.
void prepare_wait(thread_context &thr, uintptr_t ctx=0)
prepare wait by inserting 'thr' into the wait queue
void create_coroutine(coroutine_type &c, size_t stack_size, void *arg)
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
static arena & allocate_arena(market &, unsigned num_slots, unsigned num_reserved_slots)
Allocate an instance of arena.
task_group_context * my_orig_ctx
virtual void local_wait_for_all(task &parent, task *child)=0
task & allocate_task(size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context))
Allocate task object, either from the heap or a free list.
void __TBB_EXPORTED_FUNC isolate_within_arena(delegate_base &d, intptr_t isolation=0)
void cancel_wait(thread_context &thr)
Cancel the wait. Removes the thread from the wait queue if not removed yet.
static generic_scheduler * create_worker(market &m, size_t index, bool geniune)
Initialize a scheduler for a worker thread.
task **__TBB_atomic task_pool
#define ITT_SYNC_CREATE(obj, type, name)
bool is_out_of_work()
Check if there is job anywhere in arena.
int my_max_concurrency
Concurrency level for deferred initialization.
static void one_time_init()
Used to form groups of tasks.
A fast random number generator.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Bit-field representing properties of a sheduler.
static const size_t out_of_arena
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
#define ITT_NOTIFY(name, obj)
virtual task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation))=0
Try getting a task from other threads (via mailbox, stealing, FIFO queue, orphans adoption)...
A functor that spawns a task.
market * my_market
The market that owns this arena.
void detach()
Detach inbox from its outbox.
static int __TBB_EXPORTED_FUNC internal_current_slot()
void free_arena()
Completes arena shutdown, destructs and deallocates it.
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.
void const char const char int ITT_FORMAT __itt_group_sync p
size_t __TBB_EXPORTED_FUNC NFS_GetLineSize()
Cache/sector line size.
const isolation_tag no_isolation
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
void __TBB_EXPORTED_METHOD internal_enqueue(task &, intptr_t) const
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id head
bool outermost_level() const
True if the scheduler is on the outermost dispatch level.
Base class for user-defined tasks.
static const intptr_t num_priority_levels
Work stealing task scheduler.
#define GATHER_STATISTIC(x)
atomic< unsigned > my_references
Reference counter for the arena.
bool has_enqueued_tasks()
Check for the presence of enqueued tasks at all priority levels.
unsigned short get()
Get a random number.
static int __TBB_EXPORTED_FUNC internal_max_concurrency(const task_arena *)
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
task object is freshly allocated or recycled.
#define __TBB_CONTEXT_ARG1(context)
void notify_one()
Notify one thread about the event.
bool is_critical(task &t)