Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
governor.cpp
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include "governor.h"
20 #include "tbb_main.h"
21 #include "scheduler.h"
22 #include "market.h"
23 #include "arena.h"
24 
26 
27 #include "dynamic_link.h"
28 
29 namespace tbb {
30 namespace internal {
31 
32 //------------------------------------------------------------------------
33 // governor
34 //------------------------------------------------------------------------
35 
36 #if __TBB_SURVIVE_THREAD_SWITCH
37 // Support for interoperability with Intel(R) Cilk(TM) Plus.
38 
39 #if _WIN32
40 #define CILKLIB_NAME "cilkrts20.dll"
41 #else
42 #define CILKLIB_NAME "libcilkrts.so"
43 #endif
44 
46 static __cilk_tbb_retcode (*watch_stack_handler)(struct __cilk_tbb_unwatch_thunk* u,
47  struct __cilk_tbb_stack_op_thunk o);
48 
50 static const dynamic_link_descriptor CilkLinkTable[] = {
51  DLD_NOWEAK(__cilkrts_watch_stack, watch_stack_handler)
52 };
53 
54 static atomic<do_once_state> cilkrts_load_state;
55 
56 bool initialize_cilk_interop() {
57  // Pinning can fail. This is a normal situation, and means that the current
58  // thread does not use cilkrts and consequently does not need interop.
59  return dynamic_link( CILKLIB_NAME, CilkLinkTable, 1, /*handle=*/0, DYNAMIC_LINK_GLOBAL );
60 }
61 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
62 
63 namespace rml {
64  tbb_server* make_private_server( tbb_client& client );
65 }
66 
68 #if USE_PTHREAD
69  int status = theTLS.create(auto_terminate);
70 #else
71  int status = theTLS.create();
72 #endif
73  if( status )
74  handle_perror(status, "TBB failed to initialize task scheduler TLS\n");
75  is_speculation_enabled = cpu_has_speculation();
76  is_rethrow_broken = gcc_rethrow_exception_broken();
77 }
78 
80  theRMLServerFactory.close();
82 #if TBB_USE_ASSERT
83  if( __TBB_InitOnce::initialization_done() && theTLS.get() )
84  runtime_warning( "TBB is unloaded while tbb::task_scheduler_init object is alive?" );
85 #endif
86  int status = theTLS.destroy();
87  if( status )
88  runtime_warning("failed to destroy task scheduler TLS: %s", strerror(status));
90 }
91 
92 rml::tbb_server* governor::create_rml_server ( rml::tbb_client& client ) {
93  rml::tbb_server* server = NULL;
94  if( !UsePrivateRML ) {
95  ::rml::factory::status_type status = theRMLServerFactory.make_server( server, client );
96  if( status != ::rml::factory::st_success ) {
97  UsePrivateRML = true;
98  runtime_warning( "rml::tbb_factory::make_server failed with status %x, falling back on private rml", status );
99  }
100  }
101  if ( !server ) {
102  __TBB_ASSERT( UsePrivateRML, NULL );
103  server = rml::make_private_server( client );
104  }
105  __TBB_ASSERT( server, "Failed to create RML server" );
106  return server;
107 }
108 
109 
111  __TBB_ASSERT( (uintptr_t(s)&1) == 0, "Bad pointer to the scheduler" );
112  // LSB marks the scheduler initialized with arena
113  return uintptr_t(s) | uintptr_t((s && (s->my_arena || s->is_worker()))? 1 : 0);
114 }
115 
117  theTLS.set( tls_value_of(s) );
118 }
119 
121  return theTLS.get() == tls_value_of(s);
122 }
123 
125  __TBB_ASSERT( is_set(NULL) && s, NULL );
126  assume_scheduler( s );
127 #if __TBB_SURVIVE_THREAD_SWITCH
128  if( watch_stack_handler ) {
130  o.routine = &stack_op_handler;
131  o.data = s;
132  if( (*watch_stack_handler)(&s->my_cilk_unwatch_thunk, o) ) {
133  // Failed to register with cilkrts, make sure we are clean
134  s->my_cilk_unwatch_thunk.routine = NULL;
135  }
136 #if TBB_USE_ASSERT
137  else
138  s->my_cilk_state = generic_scheduler::cs_running;
139 #endif /* TBB_USE_ASSERT */
140  }
141 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
142  __TBB_ASSERT( is_set(s), NULL );
143 }
144 
147  __TBB_ASSERT( is_set(s), "attempt to unregister a wrong scheduler instance" );
148  assume_scheduler(NULL);
149 #if __TBB_SURVIVE_THREAD_SWITCH
150  __cilk_tbb_unwatch_thunk &ut = s->my_cilk_unwatch_thunk;
151  if ( ut.routine )
152  (*ut.routine)(ut.data);
153 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
154 }
155 
159 #if __TBB_SURVIVE_THREAD_SWITCH
160  atomic_do_once( &initialize_cilk_interop, cilkrts_load_state );
161 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
162 }
163 
165  one_time_init();
166  __TBB_ASSERT( is_set(NULL), "TLS contains a scheduler?" );
167  generic_scheduler* s = generic_scheduler::create_master( NULL ); // without arena
168  s->my_auto_initialized = true;
169  return s;
170 }
171 
172 generic_scheduler* governor::init_scheduler( int num_threads, stack_size_type stack_size, bool auto_init ) {
173  one_time_init();
174  if ( uintptr_t v = theTLS.get() ) {
175  generic_scheduler* s = tls_scheduler_of( v );
176  if ( (v&1) == 0 ) { // TLS holds scheduler instance without arena
177  __TBB_ASSERT( s->my_ref_count == 1, "weakly initialized scheduler must have refcount equal to 1" );
178  __TBB_ASSERT( !s->my_arena, "weakly initialized scheduler must have no arena" );
179  __TBB_ASSERT( s->my_auto_initialized, "weakly initialized scheduler is supposed to be auto-initialized" );
180  s->attach_arena( market::create_arena( default_num_threads(), 1, 0 ), 0, /*is_master*/true );
181  __TBB_ASSERT( s->my_arena_index == 0, "Master thread must occupy the first slot in its arena" );
183 #if __TBB_TASK_GROUP_CONTEXT
184  s->my_arena->my_default_ctx = s->default_context(); // it also transfers implied ownership
185 #endif
186  // Mark the scheduler as fully initialized
187  assume_scheduler( s );
188  }
189  // Increment refcount only for explicit instances of task_scheduler_init.
190  if ( !auto_init ) s->my_ref_count += 1;
191  __TBB_ASSERT( s->my_arena, "scheduler is not initialized fully" );
192  return s;
193  }
194  // Create new scheduler instance with arena
195  if( num_threads == task_scheduler_init::automatic )
196  num_threads = default_num_threads();
197  arena *a = market::create_arena( num_threads, 1, stack_size );
199  __TBB_ASSERT(s, "Somehow a local scheduler creation for a master thread failed");
200  __TBB_ASSERT( is_set(s), NULL );
201  s->my_auto_initialized = auto_init;
202  return s;
203 }
204 
206  bool ok = false;
207  __TBB_ASSERT( is_set(s), "Attempt to terminate non-local scheduler instance" );
208  if (0 == --(s->my_ref_count)) {
209  ok = s->cleanup_master( blocking );
210  __TBB_ASSERT( is_set(NULL), "cleanup_master has not cleared its TLS slot" );
211  }
212  return ok;
213 }
214 
215 void governor::auto_terminate(void* arg){
216  generic_scheduler* s = tls_scheduler_of( uintptr_t(arg) ); // arg is equivalent to theTLS.get()
217  if( s && s->my_auto_initialized ) {
218  if( !--(s->my_ref_count) ) {
219  // If the TLS slot is already cleared by OS or underlying concurrency
220  // runtime, restore its value.
221  if( !is_set(s) )
222  assume_scheduler(s);
223  s->cleanup_master( /*blocking_terminate=*/false );
224  __TBB_ASSERT( is_set(NULL), "cleanup_master has not cleared its TLS slot" );
225  }
226  }
227 }
228 
230  if ( UsePrivateRML )
231  PrintExtraVersionInfo( "RML", "private" );
232  else {
233  PrintExtraVersionInfo( "RML", "shared" );
234  theRMLServerFactory.call_with_server_info( PrintRMLVersionInfo, (void*)"" );
235  }
236 #if __TBB_SURVIVE_THREAD_SWITCH
237  if( watch_stack_handler )
238  PrintExtraVersionInfo( "CILK", CILKLIB_NAME );
239 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
240 }
241 
243  ::rml::factory::status_type res = theRMLServerFactory.open();
244  UsePrivateRML = res != ::rml::factory::st_success;
245 }
246 
247 #if __TBB_SURVIVE_THREAD_SWITCH
248 __cilk_tbb_retcode governor::stack_op_handler( __cilk_tbb_stack_op op, void* data ) {
249  __TBB_ASSERT(data,NULL);
250  generic_scheduler* s = static_cast<generic_scheduler*>(data);
251 #if TBB_USE_ASSERT
252  void* current = local_scheduler_if_initialized();
253 #if _WIN32||_WIN64
254  uintptr_t thread_id = GetCurrentThreadId();
255 #else
256  uintptr_t thread_id = uintptr_t(pthread_self());
257 #endif
258 #endif /* TBB_USE_ASSERT */
259  switch( op ) {
260  case CILK_TBB_STACK_ADOPT: {
261  __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo ||
262  current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid adoption" );
263 #if TBB_USE_ASSERT
264  if( current==s )
265  runtime_warning( "redundant adoption of %p by thread %p\n", s, (void*)thread_id );
266  s->my_cilk_state = generic_scheduler::cs_running;
267 #endif /* TBB_USE_ASSERT */
268  assume_scheduler( s );
269  break;
270  }
271  case CILK_TBB_STACK_ORPHAN: {
272  __TBB_ASSERT( current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid orphaning" );
273 #if TBB_USE_ASSERT
274  s->my_cilk_state = generic_scheduler::cs_limbo;
275 #endif /* TBB_USE_ASSERT */
276  assume_scheduler(NULL);
277  break;
278  }
279  case CILK_TBB_STACK_RELEASE: {
280  __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo ||
281  current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid release" );
282 #if TBB_USE_ASSERT
283  s->my_cilk_state = generic_scheduler::cs_freed;
284 #endif /* TBB_USE_ASSERT */
285  s->my_cilk_unwatch_thunk.routine = NULL;
286  auto_terminate( s );
287  break;
288  }
289  default:
290  __TBB_ASSERT(0, "invalid op");
291  }
292  return 0;
293 }
294 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
295 
296 #if __TBB_NUMA_SUPPORT
297 
298 #if __TBB_WEAK_SYMBOLS_PRESENT
299 #pragma weak initialize_numa_topology
300 #pragma weak subscribe_arena
301 #pragma weak unsubscribe_arena
302 
303 extern "C" {
305  size_t groups_num, int& nodes_count, int*& indexes_list, int*& concurrency_list );
307  tbb::interface7::task_arena* ta, int numa_id, int num_slots );
309 }
310 #endif /* __TBB_WEAK_SYMBOLS_PRESENT */
311 
312 // Handlers for communication with TBBbind
313 #if _WIN32 || _WIN64 || __linux__
314 static void (*initialize_numa_topology_handler)(
315  size_t groups_num, int& nodes_count, int*& indexes_list, int*& concurrency_list ) = NULL;
316 #endif /* _WIN32 || _WIN64 || __linux__ */
317 
318 static tbb::interface6::task_scheduler_observer* (*subscribe_arena_handler)(
319  tbb::interface7::task_arena* ta, int numa_id, int num_slots ) = NULL;
320 
321 static void (*unsubscribe_arena_handler)(
323 
324 #if _WIN32 || _WIN64 || __linux__
325 // Table describing how to link the handlers.
326 static const dynamic_link_descriptor TbbBindLinkTable[] = {
327  DLD(initialize_numa_topology, initialize_numa_topology_handler),
328  DLD(subscribe_arena, subscribe_arena_handler),
329  DLD(unsubscribe_arena, unsubscribe_arena_handler)
330 };
331 
332 #if TBB_USE_DEBUG
333 #define DEBUG_SUFFIX "_debug"
334 #else
335 #define DEBUG_SUFFIX
336 #endif /* TBB_USE_DEBUG */
337 
338 #if _WIN32 || _WIN64
339 #define TBBBIND_NAME "tbbbind" DEBUG_SUFFIX ".dll"
340 #elif __linux__
341 #define TBBBIND_NAME "libtbbbind" DEBUG_SUFFIX __TBB_STRING(.so.TBB_COMPATIBLE_INTERFACE_VERSION)
342 #endif /* __linux__ */
343 #endif /* _WIN32 || _WIN64 || __linux__ */
344 
345 // Stubs that will be used if TBBbind library is unavailable.
346 static tbb::interface6::task_scheduler_observer* dummy_subscribe_arena (
347  tbb::interface7::task_arena*, int, int ) { return NULL; }
348 static void dummy_unsubscribe_arena( tbb::interface6::task_scheduler_observer* ) {}
349 
350 // Representation of NUMA topology information on the TBB side.
351 // NUMA topology may be initialized by third-party component (e.g. hwloc)
352 // or just filled by default stubs (1 NUMA node with 0 index and
353 // default_num_threads value as default_concurrency).
354 namespace numa_topology {
355 namespace {
356 int numa_nodes_count = 0;
357 int* numa_indexes = NULL;
358 int* default_concurrency_list = NULL;
359 static tbb::atomic<do_once_state> numa_topology_init_state;
360 } // internal namespace
361 
362 // Tries to load TBBbind library API, if success, gets NUMA topology information from it,
363 // in another case, fills NUMA topology by stubs.
364 // TODO: Add TBBbind loading status if TBB_VERSION is set.
365 void initialization_impl() {
367 
368 #if _WIN32 || _WIN64 || __linux__
369  bool load_tbbbind = true;
370 #if _WIN32 && !_WIN64
371  // For 32-bit Windows applications, process affinity masks can only support up to 32 logical CPUs.
372  SYSTEM_INFO si;
373  GetNativeSystemInfo(&si);
374  load_tbbbind = si.dwNumberOfProcessors <= 32;
375 #endif /* _WIN32 && !_WIN64 */
376 
377  if (load_tbbbind && dynamic_link(TBBBIND_NAME, TbbBindLinkTable, 3)) {
378  int number_of_groups = 1;
379 #if _WIN32 || _WIN64
380  number_of_groups = NumberOfProcessorGroups();
381 #endif /* _WIN32 || _WIN64 */
382  initialize_numa_topology_handler(
383  number_of_groups, numa_nodes_count, numa_indexes, default_concurrency_list);
384 
385  if (numa_nodes_count==1 && numa_indexes[0] >= 0) {
386  __TBB_ASSERT(default_concurrency_list[numa_indexes[0]] == (int)governor::default_num_threads(),
387  "default_concurrency() should be equal to governor::default_num_threads() on single"
388  "NUMA node systems.");
389  }
390  return;
391  }
392 #endif /* _WIN32 || _WIN64 || __linux__ */
393 
394  static int dummy_index = -1;
395  static int dummy_concurrency = governor::default_num_threads();
396 
397  numa_nodes_count = 1;
398  numa_indexes = &dummy_index;
399  default_concurrency_list = &dummy_concurrency;
400 
401  subscribe_arena_handler = dummy_subscribe_arena;
402  unsubscribe_arena_handler = dummy_unsubscribe_arena;
403 }
404 
405 void initialize() {
406  atomic_do_once(initialization_impl, numa_topology_init_state);
407 }
408 
409 unsigned nodes_count() {
410  initialize();
411  return numa_nodes_count;
412 }
413 
414 void fill( int* indexes_array ) {
415  initialize();
416  for ( int i = 0; i < numa_nodes_count; i++ ) {
417  indexes_array[i] = numa_indexes[i];
418  }
419 }
420 
421 int default_concurrency( int node_id ) {
422  if (node_id >= 0) {
423  initialize();
424  return default_concurrency_list[node_id];
425  }
427 }
428 
429 } // namespace numa_topology
430 
432  int numa_id, int num_slots ) {
433  // numa_topology initialization will be lazily performed inside nodes_count() call
434  return (numa_id >= 0 && numa_topology::nodes_count() > 1) ?
435  subscribe_arena_handler(ta, numa_id, num_slots) : NULL;
436 }
437 
438 void destroy_binding_observer( tbb::interface6::task_scheduler_observer* observer ) {
439  __TBB_ASSERT(observer != NULL, "Trying to access observer via NULL pointer");
440  unsubscribe_arena_handler(observer);
441 }
442 #endif /* __TBB_NUMA_SUPPORT */
443 
444 } // namespace internal
445 
446 //------------------------------------------------------------------------
447 // task_scheduler_init
448 //------------------------------------------------------------------------
449 
450 using namespace internal;
451 
453 void task_scheduler_init::initialize( int number_of_threads ) {
454  initialize( number_of_threads, 0 );
455 }
456 
457 void task_scheduler_init::initialize( int number_of_threads, stack_size_type thread_stack_size ) {
458 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
459  uintptr_t new_mode = thread_stack_size & propagation_mode_mask;
460 #endif
461  thread_stack_size &= ~(stack_size_type)propagation_mode_mask;
462  if( number_of_threads!=deferred ) {
463  __TBB_ASSERT_RELEASE( !my_scheduler, "task_scheduler_init already initialized" );
464  __TBB_ASSERT_RELEASE( number_of_threads==automatic || number_of_threads > 0,
465  "number_of_threads for task_scheduler_init must be automatic or positive" );
466  internal::generic_scheduler *s = governor::init_scheduler( number_of_threads, thread_stack_size, /*auto_init=*/false );
467 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
468  if ( s->master_outermost_level() ) {
469  uintptr_t &vt = s->default_context()->my_version_and_traits;
470  uintptr_t prev_mode = vt & task_group_context::exact_exception ? propagation_mode_exact : 0;
471  vt = new_mode & propagation_mode_exact ? vt | task_group_context::exact_exception
472  : new_mode & propagation_mode_captured ? vt & ~task_group_context::exact_exception : vt;
473  // Use least significant bit of the scheduler pointer to store previous mode.
474  // This is necessary when components compiled with different compilers and/or
475  // TBB versions initialize the
476  my_scheduler = static_cast<scheduler*>((generic_scheduler*)((uintptr_t)s | prev_mode));
477  }
478  else
479 #endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
480  my_scheduler = s;
481  } else {
482  __TBB_ASSERT_RELEASE( !thread_stack_size, "deferred initialization ignores stack size setting" );
483  }
484 }
485 
487 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
488  uintptr_t prev_mode = (uintptr_t)my_scheduler & propagation_mode_exact;
489  my_scheduler = (scheduler*)((uintptr_t)my_scheduler & ~(uintptr_t)propagation_mode_exact);
490 #endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
491  generic_scheduler* s = static_cast<generic_scheduler*>(my_scheduler);
492  my_scheduler = NULL;
493  __TBB_ASSERT_RELEASE( s, "task_scheduler_init::terminate without corresponding task_scheduler_init::initialize()");
494 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
495  if ( s->master_outermost_level() ) {
496  uintptr_t &vt = s->default_context()->my_version_and_traits;
497  vt = prev_mode & propagation_mode_exact ? vt | task_group_context::exact_exception
498  : vt & ~task_group_context::exact_exception;
499  }
500 #endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
501  return governor::terminate_scheduler(s, blocking);
502 }
503 
505  internal_terminate(/*blocking_terminate=*/false);
506 }
507 
508 #if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
509 bool task_scheduler_init::internal_blocking_terminate( bool throwing ) {
510  bool ok = internal_terminate( /*blocking_terminate=*/true );
511 #if TBB_USE_EXCEPTIONS
512  if( throwing && !ok )
513  throw_exception( eid_blocking_thread_join_impossible );
514 #else
515  suppress_unused_warning( throwing );
516 #endif
517  return ok;
518 }
519 #endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
520 
523 }
524 
525 } // namespace tbb
__cilk_tbb_pfn_stack_op routine
__cilk_tbb_stack_op
static bool initialization_done()
Definition: tbb_main.h:64
void __TBB_EXPORTED_FUNC handle_perror(int error_code, const char *aux_info)
Throws std::runtime_error with what() returning error_code description prefixed with aux_info...
Definition: tbb_misc.cpp:87
bool master_outermost_level() const
True if the scheduler is on the outermost dispatch level in a master thread.
Definition: scheduler.h:653
static rml::tbb_server * create_rml_server(rml::tbb_client &)
Definition: governor.cpp:92
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void * data
static void auto_terminate(void *scheduler)
The internal routine to undo automatic initialization.
Definition: governor.cpp:215
#define __TBB_ASSERT_RELEASE(predicate, message)
Definition: tbb_stddef.h:134
static void release_resources()
Destroy the thread-local storage key and deinitialize RML.
Definition: governor.cpp:79
static generic_scheduler * create_master(arena *a)
Initialize a scheduler for a master thread.
Definition: scheduler.cpp:1283
static generic_scheduler * init_scheduler(int num_threads, stack_size_type stack_size, bool auto_init)
Processes scheduler initialization request (possibly nested) in a master thread.
Definition: governor.cpp:172
task_scheduler_observer * subscribe_arena(task_arena *ta, int numa_id, int num_slots)
Definition: tbbbind.cpp:291
The graph class.
__cilk_tbb_pfn_unwatch_stacks routine
static bool terminate_scheduler(generic_scheduler *s, bool blocking)
Processes scheduler termination request (possibly nested) in a master thread.
Definition: governor.cpp:205
const int DYNAMIC_LINK_GLOBAL
Definition: dynamic_link.h:77
bool is_worker() const
True if running on a worker thread, false otherwise.
Definition: scheduler.h:673
int __cilk_tbb_retcode
bool my_auto_initialized
True if *this was created by automatic TBB initialization.
Definition: scheduler.h:197
static unsigned default_num_threads()
Definition: governor.h:84
static const int automatic
Typedef for number of threads that is automatic.
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
Definition: governor.cpp:120
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
Definition: scheduler.h:85
bool gcc_rethrow_exception_broken()
Definition: tbb_misc.cpp:198
void dynamic_unlink_all()
void DoOneTimeInitializations()
Performs thread-safe lazy one-time general TBB initialization.
Definition: tbb_main.cpp:215
static generic_scheduler * init_scheduler_weak()
Automatic initialization of scheduler in a master thread with default settings without arena...
Definition: governor.cpp:164
void PrintExtraVersionInfo(const char *category, const char *format,...)
Prints arbitrary extra TBB version information on stderr.
Definition: tbb_misc.cpp:211
void const char const char int ITT_FORMAT __itt_group_sync s
static void initialize_rml_factory()
Definition: governor.cpp:242
void attach_arena(arena *, size_t index, bool is_master)
Definition: arena.cpp:36
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
void unsubscribe_arena(task_scheduler_observer *binding_observer)
Definition: tbbbind.cpp:298
void PrintRMLVersionInfo(void *arg, const char *server_info)
A callback routine to print RML version information on stderr.
Definition: tbb_misc.cpp:222
static void print_version_info()
Definition: governor.cpp:229
#define DLD_NOWEAK(s, h)
Definition: dynamic_link.h:57
void initialize_numa_topology(size_t groups_num, int &nodes_count, int *&indexes_list, int *&concurrency_list)
Definition: tbbbind.cpp:285
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
Definition: scheduler.h:79
OPEN_INTERNAL_NAMESPACE bool dynamic_link(const char *, const dynamic_link_descriptor *, size_t, dynamic_link_handle *handle, int)
static void sign_off(generic_scheduler *s)
Unregister TBB scheduler instance from thread-local storage.
Definition: governor.cpp:145
long my_ref_count
Reference count for scheduler.
Definition: scheduler.h:190
static int __TBB_EXPORTED_FUNC default_num_threads()
Returns the number of threads TBB scheduler would create if initialized by default.
Definition: governor.cpp:521
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
Definition: governor.cpp:116
static uintptr_t tls_value_of(generic_scheduler *s)
Computes the value of the TLS.
Definition: governor.cpp:110
bool internal_terminate(bool blocking)
Definition: governor.cpp:486
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:398
static void one_time_init()
Definition: governor.cpp:156
Used to form groups of tasks.
Definition: task.h:347
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
void atomic_do_once(const F &initializer, atomic< do_once_state > &state)
One-time initialization function.
Definition: tbb_misc.h:218
void __TBB_EXPORTED_METHOD terminate()
Inverse of method initialize.
Definition: governor.cpp:504
#define DLD(s, h)
The helper to construct dynamic_link_descriptor structure.
Definition: dynamic_link.h:56
CILK_EXPORT __cilk_tbb_retcode __cilkrts_watch_stack(struct __cilk_tbb_unwatch_thunk *u, struct __cilk_tbb_stack_op_thunk o)
static void acquire_resources()
Create key for thread-local storage and initialize RML.
Definition: governor.cpp:67
std::size_t stack_size_type
bool cleanup_master(bool blocking_terminate)
Perform necessary cleanup when a master thread stops using TBB.
Definition: scheduler.cpp:1337
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.
Definition: market.cpp:307
Work stealing task scheduler.
Definition: scheduler.h:137
Association between a handler name and location of pointer to it.
Definition: dynamic_link.h:60
static void sign_on(generic_scheduler *s)
Register TBB scheduler instance in thread-local storage.
Definition: governor.cpp:124
void destroy_process_mask()
Definition: tbb_misc.h:271
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
tbb_server * make_private_server(tbb_client &client)
Factory method called from task.cpp to create a private_server.
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
Definition: scheduler.h:82
void __TBB_EXPORTED_METHOD initialize(int number_of_threads=automatic)
Ensure that scheduler exists for this thread.
Definition: governor.cpp:453
bool cpu_has_speculation()
check for transaction support.
Definition: tbb_misc.cpp:230

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.