Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
task.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_task_H
18 #define __TBB_task_H
19 
20 #define __TBB_task_H_include_area
22 
23 #include "tbb_stddef.h"
24 #include "tbb_machine.h"
25 #include "tbb_profiling.h"
26 #include <climits>
27 
28 typedef struct ___itt_caller *__itt_caller;
29 
30 namespace tbb {
31 
32 class task;
33 class task_list;
34 class task_group_context;
35 
36 // MSVC does not allow taking the address of a member that was defined
37 // privately in task_base and made public in class task via a using declaration.
38 #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
39 #define __TBB_TASK_BASE_ACCESS public
40 #else
41 #define __TBB_TASK_BASE_ACCESS private
42 #endif
43 
44 namespace internal { //< @cond INTERNAL
45 
48  task* self;
50  public:
51  explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {
53  }
54  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
55  void __TBB_EXPORTED_METHOD free( task& ) const;
56  };
57 
58  struct cpu_ctl_env_space { int space[sizeof(internal::uint64_t)/sizeof(int)]; };
59 } //< namespace internal @endcond
60 
61 namespace interface5 {
62  namespace internal {
64 
71  friend class tbb::task;
72 
74  static void spawn( task& t );
75 
77  static void spawn( task_list& list );
78 
80 
84  }
85 
87 
91  static void __TBB_EXPORTED_FUNC destroy( task& victim );
92  };
93  } // internal
94 } // interface5
95 
97 namespace internal {
98 
99  class scheduler: no_copy {
100  public:
102  virtual void spawn( task& first, task*& next ) = 0;
103 
105  virtual void wait_for_all( task& parent, task* child ) = 0;
106 
108  virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
109 
111  // Have to have it just to shut up overzealous compilation warnings
112  virtual ~scheduler() = 0;
113 
115  virtual void enqueue( task& t, void* reserved ) = 0;
116  };
117 
119 
120  typedef intptr_t reference_count;
121 
122 #if __TBB_PREVIEW_RESUMABLE_TASKS
123  static const reference_count abandon_flag = reference_count(1) << (sizeof(reference_count)*CHAR_BIT - 2);
125 #endif
126 
128  typedef unsigned short affinity_id;
129 
130 #if __TBB_TASK_ISOLATION
131  typedef intptr_t isolation_tag;
133  const isolation_tag no_isolation = 0;
134 #endif /* __TBB_TASK_ISOLATION */
135 
136 #if __TBB_TASK_GROUP_CONTEXT
137  class generic_scheduler;
138 
141  *my_next;
142  };
143 
146  public:
148  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
149  void __TBB_EXPORTED_METHOD free( task& ) const;
150  };
151 #endif /* __TBB_TASK_GROUP_CONTEXT */
152 
154  public:
155  static task& __TBB_EXPORTED_FUNC allocate( size_t size );
156  static void __TBB_EXPORTED_FUNC free( task& );
157  };
158 
160  public:
161  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
162  void __TBB_EXPORTED_METHOD free( task& ) const;
163  };
164 
166  public:
167  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
168  void __TBB_EXPORTED_METHOD free( task& ) const;
169  };
170 
171 #if __TBB_PREVIEW_CRITICAL_TASKS
172  // TODO: move to class methods when critical task API becomes public
173  void make_critical( task& t );
174  bool is_critical( task& t );
175 #endif
176 
178 
192  class task_prefix {
193  private:
194  friend class tbb::task;
196  friend class tbb::task_list;
197  friend class internal::scheduler;
202 #if __TBB_PREVIEW_CRITICAL_TASKS
203  friend void make_critical( task& );
204  friend bool is_critical( task& );
205 #endif
206 
207 #if __TBB_TASK_ISOLATION
208  isolation_tag isolation;
210 #else
211  intptr_t reserved_space_for_task_isolation_tag;
212 #endif /* __TBB_TASK_ISOLATION */
213 
214 #if __TBB_TASK_GROUP_CONTEXT
215 
220 #endif /* __TBB_TASK_GROUP_CONTEXT */
221 
223 
229 
230 #if __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS
231  union {
232 #endif /* __TBB_TASK_PRIORITY */
233 
237 
238 #if __TBB_TASK_PRIORITY
239 
242 #endif
243 
244 #if __TBB_PREVIEW_RESUMABLE_TASKS
245  scheduler* abandoned_scheduler;
247 #endif
248 #if __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS
249  };
250 #endif /* __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS */
251 
253 
257 
259 
263  __TBB_atomic reference_count ref_count;
264 
266 
268  int depth;
269 
271 
272  unsigned char state;
273 
275 
281  unsigned char extra_state;
282 
283  affinity_id affinity;
284 
287 
289  tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
290  };
291 
292 } // namespace internal
294 
295 #if __TBB_TASK_GROUP_CONTEXT
296 
297 #if __TBB_TASK_PRIORITY
298 namespace internal {
299  static const int priority_stride_v4 = INT_MAX / 4;
300 #if __TBB_PREVIEW_CRITICAL_TASKS
301  // TODO: move into priority_t enum when critical tasks become public feature
302  static const int priority_critical = priority_stride_v4 * 3 + priority_stride_v4 / 3 * 2;
303 #endif
304 }
305 
309  priority_high = priority_normal + internal::priority_stride_v4
310 };
311 
312 #endif /* __TBB_TASK_PRIORITY */
313 
314 #if TBB_USE_CAPTURED_EXCEPTION
315  class tbb_exception;
316 #else
317  namespace internal {
318  class tbb_exception_ptr;
319  }
320 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
321 
322 class task_scheduler_init;
323 namespace interface7 { class task_arena; }
325 
327 
348 private:
350  friend class task_scheduler_init;
351  friend class task_arena;
352 
353 #if TBB_USE_CAPTURED_EXCEPTION
355 #else
357 #endif
358 
360  traits_offset = 16,
361  version_mask = 0xFFFF,
362  traits_mask = 0xFFFFul << traits_offset
363  };
364 
365 public:
366  enum kind_type {
368  bound
369  };
370 
371  enum traits_type {
372  exact_exception = 0x0001ul << traits_offset,
373 #if __TBB_FP_CONTEXT
374  fp_settings = 0x0002ul << traits_offset,
375 #endif
376  concurrent_wait = 0x0004ul << traits_offset,
377 #if TBB_USE_CAPTURED_EXCEPTION
378  default_traits = 0
379 #else
380  default_traits = exact_exception
381 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
382  };
383 
384 private:
385  enum state {
386  may_have_children = 1,
387  // the following enumerations must be the last, new 2^x values must go above
388  next_state_value, low_unused_state_bit = (next_state_value-1)*2
389  };
390 
391  union {
393  // TODO: describe asynchronous use, and whether any memory semantics are needed
395  uintptr_t _my_kind_aligner;
396  };
397 
400 
402 
405 
408 
410 
413  char _leading_padding[internal::NFS_MaxLineSize
414  - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
415  - sizeof(__itt_caller)
416 #if __TBB_FP_CONTEXT
418 #endif
419  ];
420 
421 #if __TBB_FP_CONTEXT
422 
426 #endif
427 
430 
432 
436 
438  exception_container_type *my_exception;
439 
442 
444  uintptr_t my_state;
445 
446 #if __TBB_TASK_PRIORITY
447  intptr_t my_priority;
449 #endif /* __TBB_TASK_PRIORITY */
450 
453 
455 
456  char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*)
457 #if __TBB_TASK_PRIORITY
458  - sizeof(intptr_t)
459 #endif /* __TBB_TASK_PRIORITY */
460  - sizeof(internal::string_index)
461  ];
462 
463 public:
465 
493  task_group_context ( kind_type relation_with_parent = bound,
494  uintptr_t t = default_traits )
495  : my_kind(relation_with_parent)
496  , my_version_and_traits(3 | t)
497  , my_name(internal::CUSTOM_CTX)
498  {
499  init();
500  }
501 
502  // Custom constructor for instrumentation of tbb algorithm
504  : my_kind(bound)
505  , my_version_and_traits(3 | default_traits)
506  , my_name(name)
507  {
508  init();
509  }
510 
511  // Do not introduce standalone unbind method since it will break state propagation assumptions
513 
515 
522  void __TBB_EXPORTED_METHOD reset ();
523 
525 
532  bool __TBB_EXPORTED_METHOD cancel_group_execution ();
533 
535  bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
536 
538 
544  void __TBB_EXPORTED_METHOD register_pending_exception ();
545 
546 #if __TBB_FP_CONTEXT
547 
555  void __TBB_EXPORTED_METHOD capture_fp_settings ();
556 #endif
557 
558 #if __TBB_TASK_PRIORITY
559  __TBB_DEPRECATED void set_priority ( priority_t );
561 
563  __TBB_DEPRECATED priority_t priority () const;
564 #endif /* __TBB_TASK_PRIORITY */
565 
567  uintptr_t traits() const { return my_version_and_traits & traits_mask; }
568 
569 protected:
571 
572  void __TBB_EXPORTED_METHOD init ();
573 
574 private:
575  friend class task;
577 
578  static const kind_type binding_required = bound;
579  static const kind_type binding_completed = kind_type(bound+1);
580  static const kind_type detached = kind_type(binding_completed+1);
581  static const kind_type dying = kind_type(detached+1);
582 
584  template <typename T>
585  void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );
586 
588  void bind_to ( internal::generic_scheduler *local_sched );
589 
591  void register_with ( internal::generic_scheduler *local_sched );
592 
593 #if __TBB_FP_CONTEXT
594  // TODO: Consider adding #else stub in order to omit #if sections in other code
596  void copy_fp_settings( const task_group_context &src );
597 #endif /* __TBB_FP_CONTEXT */
598 }; // class task_group_context
599 
600 #endif /* __TBB_TASK_GROUP_CONTEXT */
601 
603 
605 
607  void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
608 
610  internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
611 
612 protected:
614  task() {prefix().extra_state=1;}
615 
616 public:
618  virtual ~task() {}
619 
621  virtual task* execute() = 0;
622 
624  enum state_type {
636  recycle
637 #if __TBB_RECYCLE_TO_ENQUEUE
638  ,to_enqueue
640 #endif
641 #if __TBB_PREVIEW_RESUMABLE_TASKS
642  ,to_resume
644 #endif
645  };
646 
647  //------------------------------------------------------------------------
648  // Allocating tasks
649  //------------------------------------------------------------------------
650 
654  }
655 
656 #if __TBB_TASK_GROUP_CONTEXT
660  }
661 #endif /* __TBB_TASK_GROUP_CONTEXT */
662 
664 
666  return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
667  }
668 
671  return *reinterpret_cast<internal::allocate_child_proxy*>(this);
672  }
673 
675  using task_base::allocate_additional_child_of;
676 
677 #if __TBB_DEPRECATED_TASK_INTERFACE
678 
683  void __TBB_EXPORTED_METHOD destroy( task& t );
684 #else /* !__TBB_DEPRECATED_TASK_INTERFACE */
685  using task_base::destroy;
687 #endif /* !__TBB_DEPRECATED_TASK_INTERFACE */
688 
689  //------------------------------------------------------------------------
690  // Recycling of tasks
691  //------------------------------------------------------------------------
692 
694 
701  __TBB_ASSERT( prefix().state==executing, "execute not running?" );
702  prefix().state = allocated;
703  }
704 
706 
709  __TBB_ASSERT( prefix().state==executing, "execute not running?" );
710  prefix().state = recycle;
711  }
712 
714  void recycle_as_child_of( task& new_parent ) {
715  internal::task_prefix& p = prefix();
716  __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
717  __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
718  __TBB_ASSERT( p.parent==NULL, "parent must be null" );
719  __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
720  __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
721  p.state = allocated;
722  p.parent = &new_parent;
723 #if __TBB_TASK_GROUP_CONTEXT
724  p.context = new_parent.prefix().context;
725 #endif /* __TBB_TASK_GROUP_CONTEXT */
726  }
727 
729 
731  __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
732  __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
733  prefix().state = reexecute;
734  }
735 
736 #if __TBB_RECYCLE_TO_ENQUEUE
737 
739  void recycle_to_enqueue() {
740  __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
741  prefix().state = to_enqueue;
742  }
743 #endif /* __TBB_RECYCLE_TO_ENQUEUE */
744 
745  //------------------------------------------------------------------------
746  // Spawning and blocking
747  //------------------------------------------------------------------------
748 
750  void set_ref_count( int count ) {
751 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
752  internal_set_ref_count(count);
753 #else
754  prefix().ref_count = count;
755 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
756  }
757 
759 
761  __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
762  }
763 
765 
766  int add_ref_count( int count ) {
767  internal::call_itt_notify( internal::releasing, &prefix().ref_count );
768  internal::reference_count k = count+__TBB_FetchAndAddW( &prefix().ref_count, count );
769  __TBB_ASSERT( k>=0, "task's reference count underflowed" );
770  if( k==0 )
771  internal::call_itt_notify( internal::acquired, &prefix().ref_count );
772  return int(k);
773  }
774 
776 
778 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
779  return int(internal_decrement_ref_count());
780 #else
781  return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
782 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
783  }
784 
786  using task_base::spawn;
787 
789  void spawn_and_wait_for_all( task& child ) {
790  prefix().owner->wait_for_all( *this, &child );
791  }
792 
794  void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
795 
797  static void spawn_root_and_wait( task& root ) {
798  root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
799  }
800 
802 
804  static void spawn_root_and_wait( task_list& root_list );
805 
807 
808  void wait_for_all() {
809  prefix().owner->wait_for_all( *this, NULL );
810  }
811 
813 #if __TBB_TASK_PRIORITY
814 
824 #endif /* __TBB_TASK_PRIORITY */
825  static void enqueue( task& t ) {
826  t.prefix().owner->enqueue( t, NULL );
827  }
828 
829 #if __TBB_TASK_PRIORITY
830  __TBB_DEPRECATED static void enqueue( task& t, priority_t p ) {
832 #if __TBB_PREVIEW_CRITICAL_TASKS
834  || p == internal::priority_critical, "Invalid priority level value");
835 #else
836  __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value");
837 #endif
838  t.prefix().owner->enqueue( t, (void*)p );
839  }
840 #endif /* __TBB_TASK_PRIORITY */
841 
844 #if __TBB_TASK_PRIORITY
845  __TBB_DEPRECATED inline static void enqueue( task& t, task_arena& arena, priority_t p = priority_t(0) );
846 #else
847  inline static void enqueue( task& t, task_arena& arena);
848 #endif
849 
851  static task& __TBB_EXPORTED_FUNC self();
852 
854  task* parent() const {return prefix().parent;}
855 
857  void set_parent(task* p) {
858 #if __TBB_TASK_GROUP_CONTEXT
859  __TBB_ASSERT(!p || prefix().context == p->prefix().context, "The tasks must be in the same context");
860 #endif
861  prefix().parent = p;
862  }
863 
864 #if __TBB_TASK_GROUP_CONTEXT
865 
867  task_group_context* context() {return prefix().context;}
868 
870  task_group_context* group () { return prefix().context; }
871 #endif /* __TBB_TASK_GROUP_CONTEXT */
872 
874  bool is_stolen_task() const {
875  return (prefix().extra_state & 0x80)!=0;
876  }
877 
879  bool is_enqueued_task() const {
880  // es_task_enqueued = 0x10
881  return (prefix().extra_state & 0x10)!=0;
882  }
883 
884 #if __TBB_PREVIEW_RESUMABLE_TASKS
885  typedef void* suspend_point;
887 
889  template <typename F>
890  static void suspend(F f);
891 
893  static void resume(suspend_point tag);
894 #endif
895 
896  //------------------------------------------------------------------------
897  // Debugging
898  //------------------------------------------------------------------------
899 
901  state_type state() const {return state_type(prefix().state);}
902 
904  int ref_count() const {
905 #if TBB_USE_ASSERT
906 #if __TBB_PREVIEW_RESUMABLE_TASKS
907  internal::reference_count ref_count_ = prefix().ref_count & ~internal::abandon_flag;
908 #else
909  internal::reference_count ref_count_ = prefix().ref_count;
910 #endif
911  __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
912 #endif
913 #if __TBB_PREVIEW_RESUMABLE_TASKS
914  return int(prefix().ref_count & ~internal::abandon_flag);
915 #else
916  return int(prefix().ref_count);
917 #endif
918  }
919 
921  bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
922 
923  //------------------------------------------------------------------------
924  // Affinity
925  //------------------------------------------------------------------------
926 
928 
930 
932  void set_affinity( affinity_id id ) {prefix().affinity = id;}
933 
935  affinity_id affinity() const {return prefix().affinity;}
936 
938 
942  virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
943 
944 #if __TBB_TASK_GROUP_CONTEXT
945 
956  void __TBB_EXPORTED_METHOD change_group ( task_group_context& ctx );
957 
959 
960  bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
961 
963  bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
964 #else
965  bool is_cancelled () const { return false; }
966 #endif /* __TBB_TASK_GROUP_CONTEXT */
967 
968 #if __TBB_TASK_PRIORITY
969  __TBB_DEPRECATED void set_group_priority ( priority_t p ) { prefix().context->set_priority(p); }
971 
973  __TBB_DEPRECATED priority_t group_priority () const { return prefix().context->priority(); }
974 
975 #endif /* __TBB_TASK_PRIORITY */
976 
977 private:
979  friend class task_list;
980  friend class internal::scheduler;
982 #if __TBB_TASK_GROUP_CONTEXT
984 #endif /* __TBB_TASK_GROUP_CONTEXT */
988 
990 
992  return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
993  }
994 #if __TBB_PREVIEW_CRITICAL_TASKS
995  friend void internal::make_critical( task& );
996  friend bool internal::is_critical( task& );
997 #endif
998 }; // class task
999 
1000 #if __TBB_PREVIEW_CRITICAL_TASKS
1001 namespace internal {
1002 inline void make_critical( task& t ) { t.prefix().extra_state |= 0x8; }
1003 inline bool is_critical( task& t ) { return bool((t.prefix().extra_state & 0x8) != 0); }
1004 } // namespace internal
1005 #endif /* __TBB_PREVIEW_CRITICAL_TASKS */
1006 
1007 #if __TBB_PREVIEW_RESUMABLE_TASKS
1008 namespace internal {
1009  template <typename F>
1010  static void suspend_callback(void* user_callback, task::suspend_point tag) {
1011  // Copy user function to a new stack to avoid a race when the previous scheduler is resumed.
1012  F user_callback_copy = *static_cast<F*>(user_callback);
1013  user_callback_copy(tag);
1014  }
1015  void __TBB_EXPORTED_FUNC internal_suspend(void* suspend_callback, void* user_callback);
1016  void __TBB_EXPORTED_FUNC internal_resume(task::suspend_point);
1017  task::suspend_point __TBB_EXPORTED_FUNC internal_current_suspend_point();
1018 }
1019 
1020 template <typename F>
1021 inline void task::suspend(F f) {
1022  internal::internal_suspend((void*)internal::suspend_callback<F>, &f);
1023 }
1024 inline void task::resume(suspend_point tag) {
1025  internal::internal_resume(tag);
1026 }
1027 #endif
1028 
1030 
1031 class empty_task: public task {
1033  return NULL;
1034  }
1035 };
1036 
1038 namespace internal {
1039  template<typename F>
1040  class function_task : public task {
1041 #if __TBB_ALLOW_MUTABLE_FUNCTORS
1042  // TODO: deprecated behavior, remove
1043  F my_func;
1044 #else
1045  const F my_func;
1046 #endif
1048  my_func();
1049  return NULL;
1050  }
1051  public:
1052  function_task( const F& f ) : my_func(f) {}
1053 #if __TBB_CPP11_RVALUE_REF_PRESENT
1054  function_task( F&& f ) : my_func( std::move(f) ) {}
1055 #endif
1056  };
1057 } // namespace internal
1059 
1061 
1064 private:
1067  friend class task;
1069 public:
1071  task_list() : first(NULL), next_ptr(&first) {}
1072 
1075 
1077  bool empty() const {return !first;}
1078 
1080  void push_back( task& task ) {
1081  task.prefix().next = NULL;
1082  *next_ptr = &task;
1083  next_ptr = &task.prefix().next;
1084  }
1085 #if __TBB_TODO
1086  // TODO: add this method and implement&document the local execution ordering. See more in generic_scheduler::local_spawn
1088  void push_front( task& task ) {
1089  if( empty() ) {
1090  push_back(task);
1091  } else {
1092  task.prefix().next = first;
1093  first = &task;
1094  }
1095  }
1096 #endif
1097  task& pop_front() {
1099  __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
1100  task* result = first;
1101  first = result->prefix().next;
1102  if( !first ) next_ptr = &first;
1103  return *result;
1104  }
1105 
1107  void clear() {
1108  first=NULL;
1109  next_ptr=&first;
1110  }
1111 };
1112 
1114  t.prefix().owner->spawn( t, t.prefix().next );
1115 }
1116 
1118  if( task* t = list.first ) {
1119  t->prefix().owner->spawn( *t, *list.next_ptr );
1120  list.clear();
1121  }
1122 }
1123 
1124 inline void task::spawn_root_and_wait( task_list& root_list ) {
1125  if( task* t = root_list.first ) {
1126  t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
1127  root_list.clear();
1128  }
1129 }
1130 
1131 } // namespace tbb
1132 
1133 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
1135 }
1136 
1137 inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
1138  tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
1139 }
1140 
1141 #if __TBB_TASK_GROUP_CONTEXT
1142 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
1143  return &p.allocate(bytes);
1144 }
1145 
1146 inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
1147  p.free( *static_cast<tbb::task*>(task) );
1148 }
1149 #endif /* __TBB_TASK_GROUP_CONTEXT */
1150 
1151 inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
1152  return &p.allocate(bytes);
1153 }
1154 
1155 inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
1156  p.free( *static_cast<tbb::task*>(task) );
1157 }
1158 
1159 inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
1160  return &p.allocate(bytes);
1161 }
1162 
1163 inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
1164  p.free( *static_cast<tbb::task*>(task) );
1165 }
1166 
1167 inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
1168  return &p.allocate(bytes);
1169 }
1170 
1171 inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
1172  p.free( *static_cast<tbb::task*>(task) );
1173 }
1174 
1176 #undef __TBB_task_H_include_area
1177 
1178 #endif /* __TBB_task_H */
static const int priority_stride_v4
Definition: task.h:299
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
Definition: task.h:219
tbb::task * parent
The task whose reference count includes me.
Definition: task.h:256
state_type
Enumeration of task states that the scheduler considers.
Definition: task.h:624
internal::cpu_ctl_env_space my_cpu_ctl_env
Space for platform-specific FPU settings.
Definition: task.h:425
Base class for types that should not be assigned.
Definition: tbb_stddef.h:322
uintptr_t traits() const
Returns the context&#39;s trait.
Definition: task.h:567
tbb::task * next
"next" field for list of task
Definition: task.h:286
function_task(const F &f)
Definition: task.h:1052
exception_container_type * my_exception
Pointer to the container storing exception being propagated across this task group.
Definition: task.h:438
internal::affinity_id affinity_id
An id as used for specifying affinity.
Definition: task.h:929
task()
Default constructor.
Definition: task.h:614
void recycle_as_continuation()
Change this to be a continuation of its former self.
Definition: task.h:700
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
void recycle_to_reexecute()
Schedule this for reexecution after current execute() returns.
Definition: task.h:730
task_group_context * group()
Pointer to the task group descriptor.
Definition: task.h:870
internal::string_index my_name
Description of algorithm for scheduler based instrumentation.
Definition: task.h:452
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
Definition: task.h:435
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
Definition: task.h:263
virtual void spawn_root_and_wait(task &first, task *&next)=0
For internal use only.
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
Definition: task.h:441
task * first
Definition: task.h:1065
void call_itt_notify(notify_type, void *)
void recycle_as_safe_continuation()
Recommended to use, safe variant of recycle_as_continuation.
Definition: task.h:708
task_group_context(kind_type relation_with_parent=bound, uintptr_t t=default_traits)
Default & binding constructor.
Definition: task.h:493
static void enqueue(task &t)
Enqueue task for starvation-resistant execution.
Definition: task.h:825
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:330
bool is_enqueued_task() const
True if the task was enqueued.
Definition: task.h:879
bool is_cancelled() const
Returns true if the context has received cancellation request.
Definition: task.h:963
The graph class.
task is running, and will be destroyed after method execute() completes.
Definition: task.h:626
void set_ref_count(int count)
Set reference count.
Definition: task.h:750
#define __TBB_DEPRECATED
Definition: tbb_config.h:637
__TBB_DEPRECATED priority_t group_priority() const
Retrieves current priority of the task group this task belongs to.
Definition: task.h:973
int decrement_ref_count()
Atomically decrement reference count and returns its new value.
Definition: task.h:777
static void __TBB_EXPORTED_FUNC free(task &)
Definition: task.cpp:47
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
state_type state() const
Current execution state.
Definition: task.h:901
Exception container that preserves the exact copy of the original exception.
static task &__TBB_EXPORTED_FUNC allocate(size_t size)
Definition: task.cpp:35
virtual ~task()
Destructor.
Definition: task.h:618
void wait_for_all()
Wait for reference count to become one, and set reference count to zero.
Definition: task.h:808
#define __TBB_TASK_BASE_ACCESS
Definition: task.h:41
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:128
task is in ready pool, or is going to be put there, or was just taken off.
Definition: task.h:630
auto first(Container &c) -> decltype(begin(c))
unsigned char state
A task::state_type, stored as a byte for compactness.
Definition: task.h:272
task_group_context(internal::string_index name)
Definition: task.h:503
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
Definition: task.h:394
internal::tbb_exception_ptr exception_container_type
Definition: task.h:356
void push_back(task &task)
Push task onto back of list.
Definition: task.h:1080
void make_critical(task &t)
Definition: task.h:1002
bool cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups...
Definition: task.h:960
static const int priority_critical
Definition: task.h:302
#define __TBB_atomic
Definition: tbb_stddef.h:237
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
Definition: task.h:429
tbb::task & task()
The task corresponding to this task_prefix.
Definition: task.h:289
internal::allocate_continuation_proxy & allocate_continuation()
Returns proxy for overloaded new that allocates a continuation task of *this.
Definition: task.h:665
void const char const char int ITT_FORMAT __itt_group_sync x void const char * name
struct ___itt_caller * __itt_caller
Definition: task.h:28
scheduler * owner
Obsolete. The scheduler that owns the task.
Definition: task.h:236
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:319
task_group_context * context()
This method is deprecated and will be removed in the future.
Definition: task.h:867
task_list()
Construct empty list.
Definition: task.h:1071
Interface to be implemented by all exceptions TBB recognizes and propagates across the threads...
Memory prefix to a task object.
Definition: task.h:192
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
Definition: task.h:404
bool is_stolen_task() const
True if task was stolen from the task pool of another thread.
Definition: task.h:874
intptr_t isolation_tag
A tag for task isolation.
Definition: task.h:132
affinity_id affinity
Definition: task.h:283
task * self
No longer used, but retained for binary layout compatibility. Always NULL.
Definition: task.h:48
#define __TBB_EXPORTED_FUNC
task ** next_ptr
Definition: task.h:1066
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
Definition: task.h:444
__itt_caller itt_caller
Used to set and maintain stack stitching point for Intel Performance Tools.
Definition: task.h:407
STL namespace.
intptr_t reference_count
A reference count.
Definition: task.h:120
allocate_root_with_context_proxy(task_group_context &ctx)
Definition: task.h:147
void set_parent(task *p)
sets parent task pointer to specified value
Definition: task.h:857
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
task that does nothing. Useful for synchronization.
Definition: task.h:1031
uintptr_t _my_kind_aligner
Definition: task.h:395
int depth
Obsolete. Used to be scheduling depth before TBB 2.2.
Definition: task.h:268
void increment_ref_count()
Atomically increment reference count.
Definition: task.h:760
static void spawn(task &t)
Schedule task for execution when a worker becomes available.
Definition: task.h:1113
void clear()
Clear the list.
Definition: task.h:1107
int ref_count() const
The internal reference count.
Definition: task.h:904
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
Definition: tbb_stddef.h:216
static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of(task &t)
Like allocate_child, except that task&#39;s parent becomes "t", not this.
Definition: task.h:82
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
Definition: task.h:281
affinity_id affinity() const
Current affinity of this task.
Definition: task.h:935
unsigned short affinity_id
An id as used for specifying affinity.
Definition: task.h:128
task * execute() __TBB_override
Should be overridden by derived classes.
Definition: task.h:1032
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
Definition: task.h:652
scheduler * origin
The scheduler that allocated the task, or NULL if the task is big.
Definition: task.h:228
virtual void spawn(task &first, task *&next)=0
For internal use only.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
bool empty() const
True if list is empty; false otherwise.
Definition: task.h:1077
#define __TBB_override
Definition: tbb_stddef.h:240
task object is on free list, or is going to be put there, or was just taken off.
Definition: task.h:634
static void spawn_root_and_wait(task &root)
Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.
Definition: task.h:797
#define __TBB_FetchAndDecrementWrelease(P)
Definition: tbb_machine.h:314
void set_affinity(affinity_id id)
Set affinity for this task.
Definition: task.h:932
void __TBB_EXPORTED_METHOD free(task &) const
Definition: task.cpp:134
#define __TBB_EXPORTED_METHOD
Definition: tbb_stddef.h:98
void recycle_as_child_of(task &new_parent)
Change this to be a child of new_parent.
Definition: task.h:714
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:398
Used to form groups of tasks.
Definition: task.h:347
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
int add_ref_count(int count)
Atomically adds to reference count and returns its new value.
Definition: task.h:766
Class delimiting the scope of task scheduler activity.
task * next_offloaded
Pointer to the next offloaded lower priority task.
Definition: task.h:241
#define __TBB_FetchAndIncrementWacquire(P)
Definition: tbb_machine.h:313
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
Definition: task.h:399
task to be rescheduled.
Definition: task.h:628
Base class for methods that became static in TBB 3.0.
Definition: task.h:69
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
Definition: task.h:991
void spawn_and_wait_for_all(task &child)
Similar to spawn followed by wait_for_all, but more efficient.
Definition: task.h:789
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void const char const char int ITT_FORMAT __itt_group_sync p
task * execute() __TBB_override
Should be overridden by derived classes.
Definition: task.h:1047
context_list_node_t * my_prev
Definition: task.h:140
A list of children.
Definition: task.h:1063
internal::allocate_child_proxy & allocate_child()
Returns proxy for overloaded new that allocates a child task of *this.
Definition: task.h:670
const isolation_tag no_isolation
Definition: task.h:133
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
Definition: task.h:854
Base class for user-defined tasks.
Definition: task.h:604
~task_list()
Destroys the list, but does not destroy the task objects.
Definition: task.h:1074
Work stealing task scheduler.
Definition: scheduler.h:137
virtual void enqueue(task &t, void *reserved)=0
For internal use only.
task object is freshly allocated or recycled.
Definition: task.h:632
priority_t
Definition: task.h:306
bool is_critical(task &t)
Definition: task.h:1003

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.