27 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 32 #include "../rml/include/rml_tbb.h" 39 #if __TBB_PREVIEW_RESUMABLE_TASKS 45 class task_group_context;
46 class allocate_root_with_context_proxy;
50 #if __TBB_PREVIEW_RESUMABLE_TASKS 51 class arena_co_cache {
54 generic_scheduler** my_co_scheduler_cache;
58 unsigned my_max_index;
62 unsigned next_index() {
63 return ( my_head == my_max_index ) ? 0 : my_head + 1;
66 unsigned prev_index() {
67 return ( my_head == 0 ) ? my_max_index : my_head - 1;
70 bool internal_empty() {
71 return my_co_scheduler_cache[prev_index()] == NULL;
74 void internal_scheduler_cleanup(generic_scheduler* to_cleanup) {
75 to_cleanup->my_arena_slot = NULL;
82 void init(
unsigned cache_capacity) {
83 size_t alloc_size = cache_capacity *
sizeof(generic_scheduler*);
84 my_co_scheduler_cache = (generic_scheduler**)
NFS_Allocate(1, alloc_size, NULL);
85 memset( my_co_scheduler_cache, 0, alloc_size );
87 my_max_index = cache_capacity - 1;
92 while (generic_scheduler* to_cleanup = pop()) {
93 internal_scheduler_cleanup(to_cleanup);
101 void push(generic_scheduler*
s) {
102 generic_scheduler* to_cleanup = NULL;
106 if (my_co_scheduler_cache[my_head] != NULL) {
107 to_cleanup = my_co_scheduler_cache[my_head];
110 my_co_scheduler_cache[my_head] =
s;
112 my_head = next_index();
117 internal_scheduler_cleanup(to_cleanup);
123 generic_scheduler* pop() {
126 if (internal_empty())
return NULL;
128 my_head = prev_index();
130 generic_scheduler* to_return = my_co_scheduler_cache[my_head];
132 my_co_scheduler_cache[my_head] = NULL;
136 #endif // __TBB_PREVIEW_RESUMABLE_TASKS 151 #if __TBB_TASK_PRIORITY 152 volatile intptr_t my_top_priority;
165 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 171 #if __TBB_PREVIEW_CRITICAL_TASKS 193 #if __TBB_ARENA_OBSERVER 194 observer_list my_observers;
198 #if __TBB_NUMA_SUPPORT 203 #if __TBB_TASK_PRIORITY 204 intptr_t my_bottom_priority;
210 uintptr_t my_reload_epoch;
213 task* my_orphaned_tasks;
216 tbb::atomic<uintptr_t> my_abandonment_epoch;
222 tbb::atomic<intptr_t> my_skipped_fifo_priority;
233 #if !__TBB_FP_CONTEXT 238 #if __TBB_TASK_GROUP_CONTEXT 251 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 253 bool my_local_concurrency_mode;
255 bool my_global_concurrency_mode;
261 #if __TBB_PREVIEW_RESUMABLE_TASKS 262 arena_co_cache my_co_cache;
275 void restore_priority_if_need();
287 arena (
market&,
unsigned max_num_workers,
unsigned num_reserved_slots );
290 static arena& allocate_arena(
market&,
unsigned num_slots,
unsigned num_reserved_slots );
293 return max(2u, num_slots);
302 __TBB_ASSERT( 0<
id,
"affinity id must be positive integer" );
303 __TBB_ASSERT(
id <= my_num_slots,
"affinity id out of bounds" );
314 static const pool_state_t SNAPSHOT_EMPTY = 0;
317 static const pool_state_t SNAPSHOT_FULL = pool_state_t(-1);
320 static const unsigned ref_external_bits = 12;
323 static const unsigned ref_external = 1;
324 static const unsigned ref_worker = 1<<ref_external_bits;
331 return my_references >> ref_external_bits;
336 return num_workers_active() > my_num_workers_allotted;
340 template<arena::new_work_type work_type>
void advertise_new_work();
344 bool is_out_of_work();
353 template<
unsigned ref_param>
354 inline void on_thread_leaving ( );
357 void dump_arena_statistics ();
361 #if __TBB_TASK_PRIORITY 364 inline bool may_have_tasks (
generic_scheduler*,
bool& tasks_present,
bool& dequeuing_possible );
370 #if __TBB_COUNT_TASK_NODES 371 intptr_t workers_task_node_count();
376 bool has_enqueued_tasks();
378 static const size_t out_of_arena = ~size_t(0);
380 template <
bool as_worker>
383 size_t occupy_free_slot_in_range(
generic_scheduler& s,
size_t lower,
size_t upper );
389 template<
unsigned ref_param>
445 uintptr_t aba_epoch = my_aba_epoch;
447 __TBB_ASSERT(my_references >= ref_param,
"broken arena reference counter");
448 #if __TBB_STATISTICS_EARLY_DUMP 454 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 462 if( ref_param==ref_external && my_num_slots != my_num_reserved_slots
466 is_out = is_out_of_work();
476 if ( (my_references -= ref_param ) == 0 )
481 if( work_type == work_enqueued ) {
482 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 483 if (
as_atomic(my_market->my_num_workers_soft_limit) == 0 &&
as_atomic(my_global_concurrency_mode) ==
false )
484 my_market->enable_mandatory_concurrency(
this);
486 if ( my_max_num_workers == 0 && my_num_reserved_slots == 1 ) {
488 my_local_concurrency_mode =
true;
489 my_pool_state = SNAPSHOT_FULL;
490 my_max_num_workers = 1;
491 my_market->adjust_demand(*
this, my_max_num_workers);
499 else if( work_type == wakeup ) {
500 __TBB_ASSERT(my_max_num_workers!=0,
"Unexpected worker wakeup request");
510 if( is_busy_or_empty(snapshot) ) {
513 if( my_pool_state.compare_and_swap( SNAPSHOT_FULL, snapshot )==SNAPSHOT_EMPTY ) {
514 if( snapshot!=SNAPSHOT_EMPTY ) {
518 if( my_pool_state.compare_and_swap( SNAPSHOT_FULL, SNAPSHOT_EMPTY )!=SNAPSHOT_EMPTY ) {
526 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 527 if( work_type == work_spawned ) {
528 if( my_local_concurrency_mode ) {
535 my_max_num_workers = 0;
536 my_local_concurrency_mode =
false;
539 if (
as_atomic(my_global_concurrency_mode) ==
true )
540 my_market->mandatory_concurrency_disable(
this );
544 my_market->adjust_demand( *
this, my_max_num_workers );
uintptr_t my_aba_epoch
ABA prevention marker.
unsigned my_num_slots
The number of slots in the arena.
unsigned my_num_reserved_slots
The number of reserved slots (can be occupied only by masters).
new_work_type
Types of work advertised by advertise_new_work()
tbb::atomic< uintptr_t > my_pool_state
Current task pool state and estimate of available tasks amount.
static generic_scheduler * local_scheduler_if_initialized()
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
atomic< T > & as_atomic(T &t)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
Pads type T to fill out to a multiple of cache line size.
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
static int unsigned num_arena_slots(unsigned num_slots)
static int allocation_size(unsigned num_slots)
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
concurrent_monitor my_exit_monitors
Waiting object for master threads that cannot join the arena.
unsigned num_workers_active() const
The number of workers active in the arena.
void on_thread_leaving()
Notification that worker or master leaves its arena.
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
Class representing where mail is put.
void try_destroy_arena(arena *, uintptr_t aba_epoch)
Removes the arena from the market's list.
void atomic_fence()
Sequentially consistent full memory fence.
task_stream< num_priority_levels > my_task_stream
Task pool for the tasks scheduled via task::enqueue() method.
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
A lock that occupies a single byte.
void const char const char int ITT_FORMAT __itt_group_sync s
static bool is_busy_or_empty(pool_state_t s)
No tasks to steal or snapshot is being taken.
The structure of an arena, except the array of slots.
The container for "fairness-oriented" aka "enqueued" tasks.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
T max(const T &val1, const T &val2)
Utility template function returning greater of the two values.
unsigned short affinity_id
An id as used for specifying affinity.
bool is_recall_requested() const
Check if the recall is requested by the market.
argument_integer_type modulo_power_of_two(argument_integer_type arg, divisor_integer_type divisor)
A function to compute arg modulo divisor where divisor is a power of 2.
padded< arena_base > base_type
unsigned my_num_workers_soft_limit
Current application-imposed limit on the number of workers (see set_active_num_workers()) ...
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
Represents acquisition of a mutex.
Used to form groups of tasks.
atomic< unsigned > my_limit
The maximal number of currently busy slots.
A fast random number generator.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena...
market * my_market
The market that owns this arena.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
int my_num_workers_requested
The number of workers that are currently requested from the resource manager.
Base class for user-defined tasks.
static const intptr_t num_priority_levels
Work stealing task scheduler.
#define GATHER_STATISTIC(x)
atomic< unsigned > my_references
Reference counter for the arena.