21 #ifndef _TBB_task_stream_extended_H 22 #define _TBB_task_stream_extended_H 32 #if _TBB_task_stream_H 33 #error Either task_stream.h or this file can be included at the same time. 37 #error This code bears a preview status until it proves its usefulness/peformance suitability. 55 template<
typename T,
typename mutex_t >
56 struct queue_and_mutex {
84 return (val & (
one<<pos)) != 0;
88 #if __INTEL_COMPILER == 1110 || __INTEL_COMPILER == 1500 96 __TBB_ASSERT( ((out_of-1) & out_of) == 0,
"number of lanes is not power of two." );
104 #if __INTEL_COMPILER == 1110 || __INTEL_COMPILER == 1500 117 __TBB_ASSERT( ((out_of-1) & out_of) == 0,
"number of lanes is not power of two." );
125 __TBB_ASSERT( ((out_of-1) & out_of) == 0,
"number of lanes is not power of two." );
139 template<task_stream_accessor_type accessor>
144 task* result = queue.front();
156 result = queue.back();
158 }
while( !result && !queue.empty() );
164 template<
int Levels, task_stream_accessor_type accessor>
173 for(
int level = 0; level < Levels; level++) {
180 const unsigned max_lanes =
sizeof(
population_t) * CHAR_BIT;
182 N = n_lanes>=max_lanes ? max_lanes : n_lanes>2 ? 1<<(
__TBB_Log2(n_lanes-1)+1) : 2;
183 __TBB_ASSERT(
N==max_lanes ||
N>=n_lanes && ((
N-1)&
N)==0,
"number of lanes miscalculated");
185 for(
int level = 0; level < Levels; level++) {
192 for(
int level = 0; level < Levels; level++)
198 __TBB_ASSERT( 0 <= level && level < Levels,
"Incorrect lane level specified." );
200 if(
lock.try_acquire(
lanes[level][lane_idx].my_mutex ) ) {
201 lanes[level][lane_idx].my_queue.push_back( source );
209 template<
typename lane_selector_t>
210 void push(
task* source,
int level,
const lane_selector_t& next_lane ) {
211 bool succeed =
false;
214 lane = next_lane(
N );
216 }
while( ! (succeed =
try_push( source, level, lane )) );
221 __TBB_ASSERT( 0 <= level && level < Levels,
"Incorrect lane level specified." );
237 template<
typename lane_selector_t>
238 task*
pop(
int level,
const lane_selector_t& next_lane ) {
242 lane = next_lane(
N );
244 }
while( !
empty( level ) && !(popped =
try_pop( level, lane )) );
253 typename lane_t::queue_base_t::iterator curr = queue.end();
256 task* result = *--curr;
258 if( queue.end() - curr == 1 )
265 }
while( curr != queue.begin() );
273 unsigned idx = last_used_lane & (
N-1);
287 }
while( !
empty(level) && idx != last_used_lane );
288 last_used_lane = idx;
302 for(
int level = 0; level < Levels; level++)
303 for(
unsigned i=0; i<
N; ++i) {
306 for(
typename lane_t::queue_base_t::iterator it=lane.
my_queue.begin();
307 it!=lane.
my_queue.end(); ++it, ++result)
311 tbb::task::destroy(*t);
intptr_t drain()
Destroys all remaining tasks in every lane. Returns the number of destroyed tasks.
The container for "fairness-oriented" aka "enqueued" tasks.
void set_one_bit(population_t &dest, int pos)
void initialize(unsigned n_lanes)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
#define __TBB_ISOLATION_EXPR(isolation)
A fast random number generator.
void push(task *source, int level, const lane_selector_t &next_lane)
Push a task into a lane. Lane selection is performed by passed functor.
task * pop_specific(int level, __TBB_ISOLATION_ARG(unsigned &last_used_lane, isolation_tag isolation))
Try finding and popping a related task.
Base class for types that should not be copied or assigned.
task * get_item(lane_t::queue_base_t &queue)
void __TBB_AtomicAND(volatile void *operand, uintptr_t addend)
Base class for user-defined tasks.
padded< lane_t > * lanes[Levels]
preceding_lane_selector(unsigned &previous)
intptr_t isolation_tag
A tag for task isolation.
Base class for types that should not be assigned.
#define __TBB_ISOLATION_ARG(arg1, isolation)
population_t population[Levels]
intptr_t __TBB_Log2(uintptr_t x)
unsigned operator()(unsigned out_of) const
bool empty(int level)
Checks existence of a task.
task * get_item(lane_t::queue_base_t &queue)
bool is_bit_set(population_t val, int pos)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
unsigned operator()(unsigned out_of) const
queue_and_mutex< task *, spin_mutex > lane_t
unsigned short get()
Get a random number.
Represents acquisition of a mutex.
void clear_one_bit(population_t &dest, int pos)
Essentially, this is just a pair of a queue and a mutex to protect the queue.
std::deque< T, tbb_allocator< T > > queue_base_t
random_lane_selector(FastRandom &random)
task_stream_accessor< accessor >::lane_t lane_t
unsigned operator()(unsigned out_of) const
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
bool try_push(task *source, int level, unsigned lane_idx)
Returns true on successful push, otherwise - false.
lane_selector_base(unsigned &previous)
task * try_pop(int level, unsigned lane_idx)
Returns pointer to task on successful pop, otherwise - NULL.
task * pop(int level, const lane_selector_t &next_lane)
subsequent_lane_selector(unsigned &previous)
task * look_specific(__TBB_ISOLATION_ARG(task_stream_base::lane_t::queue_base_t &queue, isolation_tag isolation))
void __TBB_AtomicOR(volatile void *operand, uintptr_t addend)
task_stream_accessor_type
Pads type T to fill out to a multiple of cache line size.