17 #ifndef _TBB_scheduler_common_H 18 #define _TBB_scheduler_common_H 27 #if TBB_USE_ASSERT > 1 35 #define private public 46 #ifndef __TBB_SCHEDULER_MUTEX_TYPE 47 #define __TBB_SCHEDULER_MUTEX_TYPE tbb::spin_mutex 55 #if __TBB_TASK_GROUP_CONTEXT 56 #define __TBB_CONTEXT_ARG1(context) context 57 #define __TBB_CONTEXT_ARG(arg1, context) arg1, context 59 #define __TBB_CONTEXT_ARG1(context) 60 #define __TBB_CONTEXT_ARG(arg1, context) arg1 63 #if __TBB_TASK_ISOLATION 64 #define __TBB_ISOLATION_EXPR(isolation) isolation 65 #define __TBB_ISOLATION_ARG(arg1, isolation) arg1, isolation 67 #define __TBB_ISOLATION_EXPR(isolation) 68 #define __TBB_ISOLATION_ARG(arg1, isolation) arg1 74 #define TBB_TRACE(x) ((void)std::printf x) 76 #define TBB_TRACE(x) ((void)(0)) 79 #if !__TBB_CPU_CTL_ENV_PRESENT 83 #if _MSC_VER && !defined(__INTEL_COMPILER) 87 #pragma warning (disable: 4100 4127 4312 4244 4267 4706) 91 namespace interface7 {
93 class task_arena_base;
98 using namespace interface7::internal;
101 template<
typename SchedulerTraits>
class custom_scheduler;
102 class generic_scheduler;
106 class observer_proxy;
107 class task_scheduler_observer_v3;
109 #if __TBB_TASK_PRIORITY 111 static const intptr_t normalized_normal_priority = (num_priority_levels - 1) / 2;
113 inline intptr_t normalize_priority (
priority_t p ) {
121 inline void assert_priority_valid ( intptr_t p ) {
125 inline intptr_t& priority (
task& t ) {
126 return t.prefix().context->my_priority;
129 static const intptr_t num_priority_levels = 1;
135 #if __TBB_TASK_GROUP_CONTEXT 146 extern uintptr_t the_context_state_propagation_epoch;
150 typedef scheduler_mutex_type context_state_propagation_mutex_type;
151 extern context_state_propagation_mutex_type the_context_state_propagation_mutex;
167 #if __TBB_PREVIEW_CRITICAL_TASKS 168 es_task_critical = 0x8,
211 template <
typename T>
215 inline bool is_alive( uintptr_t v ) {
return v != venom; }
222 __TBB_ASSERT( (uintptr_t)task % task_alignment == 0,
"misaligned task" );
224 #if __TBB_RECYCLE_TO_ENQUEUE 225 __TBB_ASSERT( (
unsigned)task->
state()<=(unsigned)task::to_enqueue,
"corrupt task (invalid state)" );
235 #define poison_value(g) ((void)0) 245 #if __TBB_TASK_GROUP_CONTEXT 250 inline bool CancellationInfoPresent ( task& t ) {
254 #if TBB_USE_CAPTURED_EXCEPTION 258 runtime_warning(
"Exact exception propagation is requested by application but the linked library is built without support for it");
261 #define TbbRethrowException(TbbCapturedException) (TbbCapturedException)->throw_self() 265 #define TbbCurrentException(context, TbbCapturedException) \ 266 context->my_version_and_traits & task_group_context::exact_exception \ 267 ? tbb_exception_ptr::allocate() \ 268 : tbb_exception_ptr::allocate( *(TbbCapturedException) ); 269 #define TbbRethrowException(TbbCapturedException) \ 271 if( governor::rethrow_exception_broken() ) fix_broken_rethrow(); \ 272 (TbbCapturedException)->throw_self(); \ 276 #define TbbRegisterCurrentException(context, TbbCapturedException) \ 277 if ( context->cancel_group_execution() ) { \ 279 context->my_exception = TbbCurrentException( context, TbbCapturedException ); \ 282 #define TbbCatchAll(context) \ 283 catch ( tbb_exception& exc ) { \ 284 TbbRegisterCurrentException( context, &exc ); \ 285 } catch ( std::exception& exc ) { \ 286 TbbRegisterCurrentException( context, captured_exception::allocate(typeid(exc).name(), exc.what()) ); \ 288 TbbRegisterCurrentException( context, captured_exception::allocate("...", "Unidentified exception") );\ 298 #if defined(__TBB_time_stamp) && !__TBB_STEALING_PAUSE 312 }
while ( prev < finish );
314 #ifdef __TBB_STEALING_PAUSE 317 static const long PauseTime = 1500;
319 static const long PauseTime = 80;
345 #if __TBB_PREVIEW_RESUMABLE_TASKS 346 tbb::atomic<bool>* my_scheduler_is_recalled;
356 #if __TBB_PREVIEW_CRITICAL_TASKS 357 unsigned hint_for_critical;
372 statistics_counters *my_counters;
379 void fill_with_canary_pattern (
size_t first,
size_t last ) {
380 for (
size_t i = first; i <
last; ++i )
389 my_task_pool_size = byte_size /
sizeof(task*);
390 task_pool_ptr = (task**)
NFS_Allocate( 1, byte_size, NULL );
393 fill_with_canary_pattern( 0, my_task_pool_size );
400 if( task_pool_ptr ) {
403 task_pool_ptr = NULL;
404 my_task_pool_size = 0;
409 #if !__TBB_CPU_CTL_ENV_PRESENT 435 __TBB_ASSERT( my_fenv_ptr,
"cpu_ctl_env is not initialized." );
437 return memcmp( (
void*)my_fenv_ptr, (
void*)ctl.
my_fenv_ptr,
sizeof(fenv_t) );
442 fegetenv( my_fenv_ptr );
445 __TBB_ASSERT( my_fenv_ptr,
"cpu_ctl_env is not initialized." );
446 fesetenv( my_fenv_ptr );
static const int priority_stride_v4
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
void fill_with_canary_pattern(size_t, size_t)
bool operator!=(const cpu_ctl_env &ctl) const
void poison_pointer(T *__TBB_atomic &)
const size_t task_alignment
Alignment for a task object.
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
Pads type T to fill out to a multiple of cache line size.
free_task_hint
Optimization hint to free_task that enables it omit unnecessary tests and code.
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
auto last(Container &c) -> decltype(begin(c))
void __TBB_Pause(int32_t)
void assert_task_valid(const task *)
bool ConcurrentWaitsEnabled(task &t)
#define __TBB_STEALING_PAUSE
#define __TBB_time_stamp()
void free_task_pool()
Deallocate task pool that was allocated by means of allocate_task_pool.
state_type state() const
Current execution state.
virtual tbb_exception * move()=0
Creates and returns pointer to the deep copy of this exception object.
Tag for v1 tasks (i.e. tasks in TBB 1.0 and 2.0)
Bitwise-OR of local_task and small_task.
auto first(Container &c) -> decltype(begin(c))
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
Tag for v3 tasks (i.e. tasks in TBB 2.1-2.2)
#define __TBB_SCHEDULER_MUTEX_TYPE
Interface to be implemented by all exceptions TBB recognizes and propagates across the threads...
Memory prefix to a task object.
Set if ref_count might be changed by another thread. Used for debugging.
Disable caching for a small task.
cpu_ctl_env(const cpu_ctl_env &src)
void reset_extra_state(task *t)
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
A template to select either 32-bit or 64-bit constant as compile time, depending on machine word size...
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert ...
task to be recycled as continuation
Task is known to be a small task.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
const size_t task_prefix_reservation_size
Number of bytes reserved for a task prefix.
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
T punned_cast(U *ptr)
Cast between unrelated pointer types.
Task is known to have been allocated by this scheduler.
__TBB_atomic size_t head
Index of the first ready task in the deque.
size_t my_task_pool_size
Capacity of the primary task pool (number of elements - pointers to task).
Task is known to be a small task and must not be cached.
unsigned hint_for_pop
Hint provided for operations with the container of starvation-resistant tasks.
__TBB_SCHEDULER_MUTEX_TYPE scheduler_mutex_type
Mutex type for global locks in the scheduler.
Set if the task has been stolen.
This class is used by TBB to propagate information about unhandled exceptions into the root thread...
task **__TBB_atomic task_pool
task_extra_state
Definitions for bits in task_prefix::extra_state.
Used to form groups of tasks.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
const cpu_ctl_env & set_env() const
Class that implements exponential backoff.
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
void const char const char int ITT_FORMAT __itt_group_sync p
task **__TBB_atomic task_pool_ptr
Task pool of the scheduler that owns this slot.
bool bounded_pause()
Pause for a few times and return false if saturated.
Base class for user-defined tasks.
static const intptr_t num_priority_levels
Work stealing task scheduler.
cpu_ctl_env & operator=(const cpu_ctl_env &src)
__TBB_atomic size_t tail
Index of the element following the last ready task in the deque.
void allocate_task_pool(size_t n)
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.