17 #ifndef _TBB_task_stream_H 18 #define _TBB_task_stream_H 35 template<
typename T,
typename mutex_t >
47 const population_t
one = 1;
51 __TBB_ASSERT( pos<
int(
sizeof(population_t)*CHAR_BIT), NULL );
57 __TBB_ASSERT( pos<
int(
sizeof(population_t)*CHAR_BIT), NULL );
63 __TBB_ASSERT( pos<
int(
sizeof(population_t)*CHAR_BIT), NULL );
64 return (val & (one<<pos)) != 0;
71 population_t population[Levels];
77 for(
int level = 0; level < Levels; level++) {
78 population[level] = 0;
84 const unsigned max_lanes =
sizeof(
population_t) * CHAR_BIT;
86 N = n_lanes>=max_lanes ? max_lanes : n_lanes>2 ? 1<<(
__TBB_Log2(n_lanes-1)+1) : 2;
87 __TBB_ASSERT( N==max_lanes || N>=n_lanes && ((N-1)&N)==0,
"number of lanes miscalculated");
88 __TBB_ASSERT( N <=
sizeof(population_t) * CHAR_BIT, NULL );
89 for(
int level = 0; level < Levels; level++) {
96 for(
int level = 0; level < Levels; level++)
97 if (lanes[level])
delete[] lanes[level];
105 idx = random.
get() & (N-1);
107 if( lock.
try_acquire(lanes[level][idx].my_mutex) ) {
108 lanes[level][idx].my_queue.push_back(source);
116 task*
pop(
int level,
unsigned& last_used_lane ) {
119 unsigned idx = (last_used_lane+1)&(N-1);
120 for( ; population[level]; idx=(idx+1)&(N-1) ) {
122 lane_t& lane = lanes[level][idx];
133 last_used_lane = idx;
139 return !population[level];
147 for(
int level = 0; level < Levels; level++)
148 for(
unsigned i=0; i<N; ++i) {
149 lane_t& lane = lanes[level][i];
151 for(lane_t::queue_base_t::iterator it=lane.
my_queue.begin();
152 it!=lane.
my_queue.end(); ++it, ++result)
156 tbb::task::destroy(*t);
void initialize(unsigned n_lanes)
Pads type T to fill out to a multiple of cache line size.
bool try_acquire(spin_mutex &m)
Try acquiring lock (non-blocking)
Base class for types that should not be copied or assigned.
queue_and_mutex< task *, spin_mutex > lane_t
intptr_t drain()
Destroys all remaining tasks in every lane. Returns the number of destroyed tasks.
bool empty(int level)
Checks existence of a task.
void __TBB_AtomicOR(volatile void *operand, uintptr_t addend)
void __TBB_AtomicAND(volatile void *operand, uintptr_t addend)
task * pop(int level, unsigned &last_used_lane)
Try finding and popping a task.
The container for "fairness-oriented" aka "enqueued" tasks.
bool is_bit_set(population_t val, int pos)
void push(task *source, int level, FastRandom &random)
Push a task into a lane.
intptr_t __TBB_Log2(uintptr_t x)
Represents acquisition of a mutex.
Essentially, this is just a pair of a queue and a mutex to protect the queue.
A fast random number generator.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
void clear_one_bit(population_t &dest, int pos)
void set_one_bit(population_t &dest, int pos)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
Base class for user-defined tasks.
unsigned short get()
Get a random number.
std::deque< T, tbb_allocator< T > > queue_base_t