Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
arena.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef _TBB_arena_H
18 #define _TBB_arena_H
19 
20 #include "tbb/tbb_stddef.h"
21 #include "tbb/atomic.h"
22 
23 #include "tbb/tbb_machine.h"
24 
25 #include "scheduler_common.h"
26 #include "intrusive_list.h"
27 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD
28 #include "task_stream_extended.h"
29 #else
30 #include "task_stream.h"
31 #endif
32 #include "../rml/include/rml_tbb.h"
33 #include "mailbox.h"
34 #include "observer_proxy.h"
35 #include "market.h"
36 #include "governor.h"
37 #include "concurrent_monitor.h"
38 
39 #if __TBB_PREVIEW_RESUMABLE_TASKS
40 #include "tbb/spin_mutex.h"
41 #endif
42 
43 namespace tbb {
44 
45 class task_group_context;
46 class allocate_root_with_context_proxy;
47 
48 namespace internal {
49 
50 #if __TBB_PREVIEW_RESUMABLE_TASKS
51 class arena_co_cache {
54  generic_scheduler** my_co_scheduler_cache;
56  unsigned my_head;
58  unsigned my_max_index;
60  tbb::spin_mutex my_co_cache_mutex;
61 
62  unsigned next_index() {
63  return ( my_head == my_max_index ) ? 0 : my_head + 1;
64  }
65 
66  unsigned prev_index() {
67  return ( my_head == 0 ) ? my_max_index : my_head - 1;
68  }
69 
70  bool internal_empty() {
71  return my_co_scheduler_cache[prev_index()] == NULL;
72  }
73 
74  void internal_scheduler_cleanup(generic_scheduler* to_cleanup) {
75  to_cleanup->my_arena_slot = NULL;
76  // Needed by cleanup_worker function, as well as arena slot clearing
77  governor::assume_scheduler(to_cleanup);
78  generic_scheduler::cleanup_worker(to_cleanup, true);
79  }
80 
81 public:
82  void init(unsigned cache_capacity) {
83  size_t alloc_size = cache_capacity * sizeof(generic_scheduler*);
84  my_co_scheduler_cache = (generic_scheduler**)NFS_Allocate(1, alloc_size, NULL);
85  memset( my_co_scheduler_cache, 0, alloc_size );
86  my_head = 0;
87  my_max_index = cache_capacity - 1;
88  }
89 
90  void cleanup() {
91  generic_scheduler* current = governor::local_scheduler_if_initialized();
92  while (generic_scheduler* to_cleanup = pop()) {
93  internal_scheduler_cleanup(to_cleanup);
94  }
96  NFS_Free(my_co_scheduler_cache);
97  }
98 
101  void push(generic_scheduler* s) {
102  generic_scheduler* to_cleanup = NULL;
103  {
104  tbb::spin_mutex::scoped_lock lock(my_co_cache_mutex);
105  // Check if we are replacing some existing buffer entrance
106  if (my_co_scheduler_cache[my_head] != NULL) {
107  to_cleanup = my_co_scheduler_cache[my_head];
108  }
109  // Store the cached value
110  my_co_scheduler_cache[my_head] = s;
111  // Move head index to the next slot
112  my_head = next_index();
113  }
114  // Cleanup replaced buffer if any
115  if (to_cleanup) {
116  generic_scheduler* current = governor::local_scheduler_if_initialized();
117  internal_scheduler_cleanup(to_cleanup);
119  }
120  }
121 
123  generic_scheduler* pop() {
124  tbb::spin_mutex::scoped_lock lock(my_co_cache_mutex);
125  // No cached coroutine
126  if (internal_empty()) return NULL;
127  // Move head index to the currently available value
128  my_head = prev_index();
129  // Retrieve the value from the buffer
130  generic_scheduler* to_return = my_co_scheduler_cache[my_head];
131  // Clear the previous entrance value
132  my_co_scheduler_cache[my_head] = NULL;
133  return to_return;
134  }
135 };
136 #endif // __TBB_PREVIEW_RESUMABLE_TASKS
137 
139 
141 struct arena_base : padded<intrusive_list_node> {
143  unsigned my_num_workers_allotted; // heavy use in stealing loop
144 
146 
149  atomic<unsigned> my_references; // heavy use in stealing loop
150 
151 #if __TBB_TASK_PRIORITY
152  volatile intptr_t my_top_priority; // heavy use in stealing loop
154 #endif /* !__TBB_TASK_PRIORITY */
155 
157  atomic<unsigned> my_limit; // heavy use in stealing loop
158 
160 
165 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD
166  task_stream<num_priority_levels, front_accessor> my_task_stream; // heavy use in stealing loop
167 #else
168  task_stream<num_priority_levels> my_task_stream; // heavy use in stealing loop
169 #endif
170 
171 #if __TBB_PREVIEW_CRITICAL_TASKS
172 
176  // used on the hot path of the task dispatch loop
177  task_stream<1, back_nonnull_accessor> my_critical_task_stream;
178 #endif
179 
182 
185 
187 
191  tbb::atomic<uintptr_t> my_pool_state;
192 
193 #if __TBB_ARENA_OBSERVER
194  observer_list my_observers;
196 #endif
197 
198 #if __TBB_NUMA_SUPPORT
199  task_scheduler_observer* my_numa_binding_observer;
201 #endif /*__TBB_NUMA_SUPPORT*/
202 
203 #if __TBB_TASK_PRIORITY
204  intptr_t my_bottom_priority;
206 
208 
210  uintptr_t my_reload_epoch;
211 
213  task* my_orphaned_tasks;
214 
216  tbb::atomic<uintptr_t> my_abandonment_epoch;
217 
219 
222  tbb::atomic<intptr_t> my_skipped_fifo_priority;
223 #endif /* !__TBB_TASK_PRIORITY */
224 
225  // Below are rarely modified members
226 
229 
231  uintptr_t my_aba_epoch;
232 
233 #if !__TBB_FP_CONTEXT
234  cpu_ctl_env my_cpu_ctl_env;
236 #endif
237 
238 #if __TBB_TASK_GROUP_CONTEXT
239 
242  task_group_context* my_default_ctx;
243 #endif /* __TBB_TASK_GROUP_CONTEXT */
244 
246  unsigned my_num_slots;
247 
250 
251 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
252  // arena needs an extra worker despite the arena limit
253  bool my_local_concurrency_mode;
254  // arena needs an extra worker despite a global limit
255  bool my_global_concurrency_mode;
256 #endif /* __TBB_ENQUEUE_ENFORCED_CONCURRENCY */
257 
260 
261 #if __TBB_PREVIEW_RESUMABLE_TASKS
262  arena_co_cache my_co_cache;
264 #endif
265 
266 #if TBB_USE_ASSERT
267  uintptr_t my_guard;
269 #endif /* TBB_USE_ASSERT */
270 }; // struct arena_base
271 
272 class arena: public padded<arena_base>
273 {
275  void restore_priority_if_need();
276 public:
278 
283  work_enqueued
284  };
285 
287  arena ( market&, unsigned max_num_workers, unsigned num_reserved_slots );
288 
290  static arena& allocate_arena( market&, unsigned num_slots, unsigned num_reserved_slots );
291 
292  static int unsigned num_arena_slots ( unsigned num_slots ) {
293  return max(2u, num_slots);
294  }
295 
296  static int allocation_size ( unsigned num_slots ) {
297  return sizeof(base_type) + num_slots * (sizeof(mail_outbox) + sizeof(arena_slot));
298  }
299 
302  __TBB_ASSERT( 0<id, "affinity id must be positive integer" );
303  __TBB_ASSERT( id <= my_num_slots, "affinity id out of bounds" );
304 
305  return ((mail_outbox*)this)[-(int)id];
306  }
307 
309  void free_arena ();
310 
311  typedef uintptr_t pool_state_t;
312 
314  static const pool_state_t SNAPSHOT_EMPTY = 0;
315 
317  static const pool_state_t SNAPSHOT_FULL = pool_state_t(-1);
318 
320  static const unsigned ref_external_bits = 12; // up to 4095 external and 1M workers
321 
323  static const unsigned ref_external = 1;
324  static const unsigned ref_worker = 1<<ref_external_bits;
325 
327  static bool is_busy_or_empty( pool_state_t s ) { return s < SNAPSHOT_FULL; }
328 
330  unsigned num_workers_active() const {
331  return my_references >> ref_external_bits;
332  }
333 
335  bool is_recall_requested() const {
336  return num_workers_active() > my_num_workers_allotted;
337  }
338 
340  template<arena::new_work_type work_type> void advertise_new_work();
341 
343 
344  bool is_out_of_work();
345 
347  void enqueue_task( task&, intptr_t, FastRandom & );
348 
350  void process( generic_scheduler& );
351 
353  template<unsigned ref_param>
354  inline void on_thread_leaving ( );
355 
356 #if __TBB_STATISTICS
357  void dump_arena_statistics ();
359 #endif /* __TBB_STATISTICS */
360 
361 #if __TBB_TASK_PRIORITY
362 
364  inline bool may_have_tasks ( generic_scheduler*, bool& tasks_present, bool& dequeuing_possible );
365 
367  void orphan_offloaded_tasks ( generic_scheduler& s );
368 #endif /* __TBB_TASK_PRIORITY */
369 
370 #if __TBB_COUNT_TASK_NODES
371  intptr_t workers_task_node_count();
373 #endif
374 
376  bool has_enqueued_tasks();
377 
378  static const size_t out_of_arena = ~size_t(0);
380  template <bool as_worker>
381  size_t occupy_free_slot( generic_scheduler& s );
383  size_t occupy_free_slot_in_range( generic_scheduler& s, size_t lower, size_t upper );
384 
386  arena_slot my_slots[1];
387 }; // class arena
388 
389 template<unsigned ref_param>
390 inline void arena::on_thread_leaving ( ) {
391  //
392  // Implementation of arena destruction synchronization logic contained various
393  // bugs/flaws at the different stages of its evolution, so below is a detailed
394  // description of the issues taken into consideration in the framework of the
395  // current design.
396  //
397  // In case of using fire-and-forget tasks (scheduled via task::enqueue())
398  // master thread is allowed to leave its arena before all its work is executed,
399  // and market may temporarily revoke all workers from this arena. Since revoked
400  // workers never attempt to reset arena state to EMPTY and cancel its request
401  // to RML for threads, the arena object is destroyed only when both the last
402  // thread is leaving it and arena's state is EMPTY (that is its master thread
403  // left and it does not contain any work).
404  // Thus resetting arena to EMPTY state (as earlier TBB versions did) should not
405  // be done here (or anywhere else in the master thread to that matter); doing so
406  // can result either in arena's premature destruction (at least without
407  // additional costly checks in workers) or in unnecessary arena state changes
408  // (and ensuing workers migration).
409  //
410  // A worker that checks for work presence and transitions arena to the EMPTY
411  // state (in snapshot taking procedure arena::is_out_of_work()) updates
412  // arena::my_pool_state first and only then arena::my_num_workers_requested.
413  // So the check for work absence must be done against the latter field.
414  //
415  // In a time window between decrementing the active threads count and checking
416  // if there is an outstanding request for workers. New worker thread may arrive,
417  // finish remaining work, set arena state to empty, and leave decrementing its
418  // refcount and destroying. Then the current thread will destroy the arena
419  // the second time. To preclude it a local copy of the outstanding request
420  // value can be stored before decrementing active threads count.
421  //
422  // But this technique may cause two other problem. When the stored request is
423  // zero, it is possible that arena still has threads and they can generate new
424  // tasks and thus re-establish non-zero requests. Then all the threads can be
425  // revoked (as described above) leaving this thread the last one, and causing
426  // it to destroy non-empty arena.
427  //
428  // The other problem takes place when the stored request is non-zero. Another
429  // thread may complete the work, set arena state to empty, and leave without
430  // arena destruction before this thread decrements the refcount. This thread
431  // cannot destroy the arena either. Thus the arena may be "orphaned".
432  //
433  // In both cases we cannot dereference arena pointer after the refcount is
434  // decremented, as our arena may already be destroyed.
435  //
436  // If this is the master thread, the market is protected by refcount to it.
437  // In case of workers market's liveness is ensured by the RML connection
438  // rundown protocol, according to which the client (i.e. the market) lives
439  // until RML server notifies it about connection termination, and this
440  // notification is fired only after all workers return into RML.
441  //
442  // Thus if we decremented refcount to zero we ask the market to check arena
443  // state (including the fact if it is alive) under the lock.
444  //
445  uintptr_t aba_epoch = my_aba_epoch;
446  market* m = my_market;
447  __TBB_ASSERT(my_references >= ref_param, "broken arena reference counter");
448 #if __TBB_STATISTICS_EARLY_DUMP
449  // While still holding a reference to the arena, compute how many external references are left.
450  // If just one, dump statistics.
451  if ( modulo_power_of_two(my_references,ref_worker)==ref_param ) // may only be true with ref_external
452  GATHER_STATISTIC( dump_arena_statistics() );
453 #endif
454 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
455  // When there is no workers someone must free arena, as
456  // without workers, no one calls is_out_of_work().
457  // Skip workerless arenas because they have no demand for workers.
458  // TODO: consider more strict conditions for the cleanup,
459  // because it can create the demand of workers,
460  // but the arena can be already empty (and so ready for destroying)
461  // TODO: Fix the race: while we check soft limit and it might be changed.
462  if( ref_param==ref_external && my_num_slots != my_num_reserved_slots
463  && 0 == m->my_num_workers_soft_limit && !my_global_concurrency_mode ) {
464  bool is_out = false;
465  for (int i=0; i<num_priority_levels; i++) {
466  is_out = is_out_of_work();
467  if (is_out)
468  break;
469  }
470  // We expect, that in worst case it's enough to have num_priority_levels-1
471  // calls to restore priorities and yet another is_out_of_work() to conform
472  // that no work was found. But as market::set_active_num_workers() can be called
473  // concurrently, can't guarantee last is_out_of_work() return true.
474  }
475 #endif
476  if ( (my_references -= ref_param ) == 0 )
477  m->try_destroy_arena( this, aba_epoch );
478 }
479 
480 template<arena::new_work_type work_type> void arena::advertise_new_work() {
481  if( work_type == work_enqueued ) {
482 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
483  if ( as_atomic(my_market->my_num_workers_soft_limit) == 0 && as_atomic(my_global_concurrency_mode) == false )
484  my_market->enable_mandatory_concurrency(this);
485 
486  if ( my_max_num_workers == 0 && my_num_reserved_slots == 1 ) {
487  __TBB_ASSERT(!my_local_concurrency_mode, NULL);
488  my_local_concurrency_mode = true;
489  my_pool_state = SNAPSHOT_FULL;
490  my_max_num_workers = 1;
491  my_market->adjust_demand(*this, my_max_num_workers);
492  return;
493  }
494 #endif /* __TBB_ENQUEUE_ENFORCED_CONCURRENCY */
495  // Local memory fence here and below is required to avoid missed wakeups; see the comment below.
496  // Starvation resistant tasks require concurrency, so missed wakeups are unacceptable.
497  atomic_fence();
498  }
499  else if( work_type == wakeup ) {
500  __TBB_ASSERT(my_max_num_workers!=0, "Unexpected worker wakeup request");
501  atomic_fence();
502  }
503  // Double-check idiom that, in case of spawning, is deliberately sloppy about memory fences.
504  // Technically, to avoid missed wakeups, there should be a full memory fence between the point we
505  // released the task pool (i.e. spawned task) and read the arena's state. However, adding such a
506  // fence might hurt overall performance more than it helps, because the fence would be executed
507  // on every task pool release, even when stealing does not occur. Since TBB allows parallelism,
508  // but never promises parallelism, the missed wakeup is not a correctness problem.
509  pool_state_t snapshot = my_pool_state;
510  if( is_busy_or_empty(snapshot) ) {
511  // Attempt to mark as full. The compare_and_swap below is a little unusual because the
512  // result is compared to a value that can be different than the comparand argument.
513  if( my_pool_state.compare_and_swap( SNAPSHOT_FULL, snapshot )==SNAPSHOT_EMPTY ) {
514  if( snapshot!=SNAPSHOT_EMPTY ) {
515  // This thread read "busy" into snapshot, and then another thread transitioned
516  // my_pool_state to "empty" in the meantime, which caused the compare_and_swap above
517  // to fail. Attempt to transition my_pool_state from "empty" to "full".
518  if( my_pool_state.compare_and_swap( SNAPSHOT_FULL, SNAPSHOT_EMPTY )!=SNAPSHOT_EMPTY ) {
519  // Some other thread transitioned my_pool_state from "empty", and hence became
520  // responsible for waking up workers.
521  return;
522  }
523  }
524  // This thread transitioned pool from empty to full state, and thus is responsible for
525  // telling the market that there is work to do.
526 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
527  if( work_type == work_spawned ) {
528  if( my_local_concurrency_mode ) {
529  __TBB_ASSERT(my_max_num_workers==1, "");
530  __TBB_ASSERT(!governor::local_scheduler()->is_worker(), "");
531  // There was deliberate oversubscription on 1 core for sake of starvation-resistant tasks.
532  // Now a single active thread (must be the master) supposedly starts a new parallel region
533  // with relaxed sequential semantics, and oversubscription should be avoided.
534  // Demand for workers has been decreased to 0 during SNAPSHOT_EMPTY, so just keep it.
535  my_max_num_workers = 0;
536  my_local_concurrency_mode = false;
537  return;
538  }
539  if ( as_atomic(my_global_concurrency_mode) == true )
540  my_market->mandatory_concurrency_disable( this );
541  }
542 #endif /* __TBB_ENQUEUE_ENFORCED_CONCURRENCY */
543  // TODO: investigate adjusting of arena's demand by a single worker.
544  my_market->adjust_demand( *this, my_max_num_workers );
545  }
546  }
547 }
548 
549 } // namespace internal
550 } // namespace tbb
551 
552 #endif /* _TBB_arena_H */
uintptr_t my_aba_epoch
ABA prevention marker.
Definition: arena.h:231
unsigned my_num_slots
The number of slots in the arena.
Definition: arena.h:246
unsigned my_num_reserved_slots
The number of reserved slots (can be occupied only by masters).
Definition: arena.h:249
new_work_type
Types of work advertised by advertise_new_work()
Definition: arena.h:280
tbb::atomic< uintptr_t > my_pool_state
Current task pool state and estimate of available tasks amount.
Definition: arena.h:191
static generic_scheduler * local_scheduler_if_initialized()
Definition: governor.h:139
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
Pads type T to fill out to a multiple of cache line size.
Definition: tbb_stddef.h:261
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
Definition: arena.h:181
static int unsigned num_arena_slots(unsigned num_slots)
Definition: arena.h:292
static int allocation_size(unsigned num_slots)
Definition: arena.h:296
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
Definition: arena.h:480
concurrent_monitor my_exit_monitors
Waiting object for master threads that cannot join the arena.
Definition: arena.h:259
The graph class.
unsigned num_workers_active() const
The number of workers active in the arena.
Definition: arena.h:330
void on_thread_leaving()
Notification that worker or master leaves its arena.
Definition: arena.h:390
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
Definition: scheduler.cpp:1327
Class representing where mail is put.
Definition: mailbox.h:96
void try_destroy_arena(arena *, uintptr_t aba_epoch)
Removes the arena from the market&#39;s list.
Definition: market.cpp:332
void atomic_fence()
Sequentially consistent full memory fence.
Definition: tbb_machine.h:342
task_stream< num_priority_levels > my_task_stream
Task pool for the tasks scheduled via task::enqueue() method.
Definition: arena.h:168
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
Definition: arena.h:301
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
Definition: governor.h:129
A lock that occupies a single byte.
Definition: spin_mutex.h:39
void const char const char int ITT_FORMAT __itt_group_sync s
static bool is_busy_or_empty(pool_state_t s)
No tasks to steal or snapshot is being taken.
Definition: arena.h:327
The structure of an arena, except the array of slots.
Definition: arena.h:141
The container for "fairness-oriented" aka "enqueued" tasks.
Definition: task_stream.h:69
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
T max(const T &val1, const T &val2)
Utility template function returning greater of the two values.
Definition: tbb_misc.h:124
unsigned short affinity_id
An id as used for specifying affinity.
Definition: task.h:128
bool is_recall_requested() const
Check if the recall is requested by the market.
Definition: arena.h:335
argument_integer_type modulo_power_of_two(argument_integer_type arg, divisor_integer_type divisor)
A function to compute arg modulo divisor where divisor is a power of 2.
Definition: tbb_stddef.h:382
padded< arena_base > base_type
Definition: arena.h:277
unsigned my_num_workers_soft_limit
Current application-imposed limit on the number of workers (see set_active_num_workers()) ...
Definition: market.h:78
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
Definition: governor.cpp:116
Represents acquisition of a mutex.
Definition: spin_mutex.h:53
Used to form groups of tasks.
Definition: task.h:347
atomic< unsigned > my_limit
The maximal number of currently busy slots.
Definition: arena.h:157
A fast random number generator.
Definition: tbb_misc.h:140
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
uintptr_t pool_state_t
Definition: arena.h:311
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena...
Definition: arena.h:143
market * my_market
The market that owns this arena.
Definition: arena.h:228
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
int my_num_workers_requested
The number of workers that are currently requested from the resource manager.
Definition: arena.h:184
Base class for user-defined tasks.
Definition: task.h:604
static const intptr_t num_priority_levels
Work stealing task scheduler.
Definition: scheduler.h:137
#define GATHER_STATISTIC(x)
atomic< unsigned > my_references
Reference counter for the arena.
Definition: arena.h:149

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.