30 #if __TBB_TASK_PRIORITY 32 arena *&next = my_priority_levels[a.my_top_priority].next_arena;
38 if ( arenas.
size() == 1 )
39 next = &*arenas.
begin();
43 #if __TBB_TASK_PRIORITY 45 arena *&next = my_priority_levels[a.my_top_priority].next_arena;
50 arena_list_type::iterator it = next;
53 if ( ++it == arenas.
end() && arenas.
size() > 1 )
64 market::market (
unsigned workers_soft_limit,
unsigned workers_hard_limit,
size_t stack_size )
65 : my_num_workers_hard_limit(workers_hard_limit)
66 , my_num_workers_soft_limit(workers_soft_limit)
68 , my_global_top_priority(normalized_normal_priority)
69 , my_global_bottom_priority(normalized_normal_priority)
72 , my_stack_size(stack_size)
73 , my_workers_soft_limit_to_report(workers_soft_limit)
75 #if __TBB_TASK_PRIORITY 88 workers_soft_limit = soft_limit-1;
91 if( workers_soft_limit >= workers_hard_limit )
92 workers_soft_limit = workers_hard_limit-1;
93 return workers_soft_limit;
103 if( old_public_count==0 )
109 "skip_soft_limit_warning must be larger than any valid workers_requested" );
111 if( soft_limit_to_report < workers_requested ) {
113 "The request for %u workers is ignored. Further requests for more workers " 114 "will be silently ignored until the limit changes.\n",
115 soft_limit_to_report, workers_requested );
125 "The request for larger stack (%u) cannot be satisfied.\n",
130 if( stack_size == 0 )
144 #if __TBB_TASK_GROUP_CONTEXT 146 "my_workers must be the last data field of the market class");
151 memset( storage, 0,
size );
153 m =
new (storage)
market( workers_soft_limit, workers_hard_limit, stack_size );
159 runtime_warning(
"RML might limit the number of workers to %u while %u is requested.\n" 160 , m->
my_server->default_concurrency(), workers_soft_limit );
166 #if __TBB_COUNT_TASK_NODES 167 if ( my_task_node_count )
168 runtime_warning(
"Leaked %ld task objects\n", (
long)my_task_node_count );
170 this->market::~market();
177 bool do_release =
false;
180 if ( blocking_terminate ) {
181 __TBB_ASSERT( is_public,
"Only an object with a public reference can request the blocking terminate" );
212 return blocking_terminate;
218 int old_requested=0, requested=0;
219 bool need_mandatory =
false;
237 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 240 if( !(m->my_mandatory_num_requested && !soft_limit) )
244 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 245 m->my_mandatory_num_requested? 0 :
248 requested =
min(demand, (
int)soft_limit);
251 #if __TBB_TASK_PRIORITY 252 m->my_priority_levels[m->my_global_top_priority].workers_available = soft_limit;
258 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 259 if( !m->my_mandatory_num_requested && !soft_limit ) {
262 #if __TBB_TASK_PRIORITY 263 for(
int p = m->my_global_top_priority;
p >= m->my_global_bottom_priority; --
p ) {
264 priority_level_info &pl = m->my_priority_levels[
p];
270 for( arena_list_type::iterator it = arenas.
begin(); it != arenas.
end(); ++it ) {
271 if( !it->my_task_stream.empty(
p) ) {
273 if( m->mandatory_concurrency_enable_impl( &*it ) )
274 need_mandatory =
true;
277 #if __TBB_TASK_PRIORITY 284 int delta = requested - old_requested;
285 if( need_mandatory ) ++delta;
287 m->
my_server->adjust_job_count_estimate( delta );
293 return ((
const market&)client).must_join_workers();
326 #if __TBB_TASK_PRIORITY 330 priority_level_info &pl = my_priority_levels[
p];
336 if ( it->my_aba_epoch == aba_epoch ) {
352 #if __TBB_TASK_PRIORITY 360 if ( arenas.
empty() )
362 arena_list_type::iterator it = hint;
366 if ( ++it == arenas.
end() )
369 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 370 && !a.recall_by_mandatory_request()
376 }
while ( it != hint );
382 max_workers =
min(workers_demand, max_workers);
385 arena_list_type::iterator it = arenas.
begin();
386 for ( ; it != arenas.
end(); ++it ) {
393 int allotted = tmp / workers_demand;
394 carry = tmp % workers_demand;
397 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 398 if ( !allotted && a.must_have_concurrency() )
402 assigned += allotted;
404 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 415 for ( arena_list_type::iterator it = arenas.
begin(); it != arenas.
end(); ++it )
422 #if __TBB_TASK_PRIORITY 423 inline void market::update_global_top_priority ( intptr_t newPriority ) {
425 my_global_top_priority = newPriority;
426 my_priority_levels[newPriority].workers_available =
427 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 431 advance_global_reload_epoch();
434 inline void market::reset_global_priority () {
435 my_global_bottom_priority = normalized_normal_priority;
436 update_global_top_priority(normalized_normal_priority);
444 int p = my_global_top_priority;
452 while ( !a &&
p >= my_global_bottom_priority ) {
453 priority_level_info &pl = my_priority_levels[
p--];
466 intptr_t i = highest_affected_priority;
467 int available = my_priority_levels[i].workers_available;
468 for ( ; i >= my_global_bottom_priority; --i ) {
469 priority_level_info &pl = my_priority_levels[i];
470 pl.workers_available = available;
471 if ( pl.workers_requested ) {
472 available -=
update_allotment( pl.arenas, pl.workers_requested, available );
473 if ( available < 0 ) {
479 __TBB_ASSERT( i <= my_global_bottom_priority || !available, NULL );
480 for ( --i; i >= my_global_bottom_priority; --i ) {
481 priority_level_info &pl = my_priority_levels[i];
482 pl.workers_available = 0;
483 arena_list_type::iterator it = pl.arenas.begin();
484 for ( ; it != pl.arenas.end(); ++it ) {
485 __TBB_ASSERT( it->my_num_workers_requested >= 0 || !it->my_num_workers_allotted, NULL );
486 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 487 it->my_num_workers_allotted = it->must_have_concurrency() ? 1 : 0;
489 it->my_num_workers_allotted = 0;
496 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 497 bool market::mandatory_concurrency_enable_impl ( arena *a,
bool *enabled ) {
498 if( a->my_concurrency_mode==arena_base::cm_enforced_global ) {
505 a->my_max_num_workers = 1;
506 a->my_concurrency_mode = arena_base::cm_enforced_global;
507 #if __TBB_TASK_PRIORITY 508 priority_level_info &pl = my_priority_levels[a->my_top_priority];
509 pl.workers_requested++;
510 if( my_global_top_priority < a->my_top_priority ) {
511 my_global_top_priority = a->my_top_priority;
512 advance_global_reload_epoch();
515 a->my_num_workers_requested++;
516 a->my_num_workers_allotted++;
517 if( 1 == ++my_mandatory_num_requested ) {
524 bool market::mandatory_concurrency_enable ( arena *a ) {
529 add_thread = mandatory_concurrency_enable_impl(a, &enabled);
532 my_server->adjust_job_count_estimate( 1 );
536 void market::mandatory_concurrency_disable ( arena *a ) {
537 bool remove_thread =
false;
538 int delta_adjust_demand = 0;
543 if( a->my_concurrency_mode!=arena_base::cm_enforced_global )
546 a->my_max_num_workers = 0;
547 #if __TBB_TASK_PRIORITY 548 if ( a->my_top_priority != normalized_normal_priority ) {
549 update_arena_top_priority( *a, normalized_normal_priority );
551 a->my_bottom_priority = normalized_normal_priority;
554 int val = --my_mandatory_num_requested;
558 remove_thread =
true;
560 a->my_num_workers_requested--;
561 if (a->my_num_workers_requested > 0)
562 delta_adjust_demand = a->my_num_workers_requested;
564 a->my_num_workers_allotted = 0;
566 #if __TBB_TASK_PRIORITY 567 priority_level_info &pl = my_priority_levels[a->my_top_priority];
568 pl.workers_requested--;
569 intptr_t
p = my_global_top_priority;
570 for (; !my_priority_levels[
p].workers_requested &&
p>0;
p--)
573 reset_global_priority();
574 else if(
p!= my_global_top_priority )
575 update_global_top_priority(
p);
577 a->my_concurrency_mode = arena::cm_normal;
579 if( delta_adjust_demand )
582 my_server->adjust_job_count_estimate( -1 );
594 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 596 if ( a.
my_market->my_mandatory_num_requested && a.my_concurrency_mode!=arena_base::cm_normal )
601 if ( prev_req <= 0 ) {
607 else if ( prev_req < 0 ) {
611 #if !__TBB_TASK_PRIORITY 614 intptr_t
p = a.my_top_priority;
615 priority_level_info &pl = my_priority_levels[
p];
616 pl.workers_requested += delta;
619 if ( a.my_top_priority != normalized_normal_priority ) {
621 update_arena_top_priority( a, normalized_normal_priority );
623 a.my_bottom_priority = normalized_normal_priority;
625 if (
p == my_global_top_priority ) {
626 if ( !pl.workers_requested ) {
627 while ( --
p >= my_global_bottom_priority && !my_priority_levels[
p].workers_requested )
629 if (
p < my_global_bottom_priority )
630 reset_global_priority();
632 update_global_top_priority(
p);
636 else if (
p > my_global_top_priority ) {
640 update_global_top_priority(
p);
642 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 645 && a.
my_market->my_mandatory_num_requested && a.my_concurrency_mode!=arena_base::cm_normal )
651 else if (
p == my_global_bottom_priority ) {
652 if ( !pl.workers_requested ) {
653 while ( ++
p <= my_global_top_priority && !my_priority_levels[
p].workers_requested )
655 if (
p > my_global_top_priority )
656 reset_global_priority();
658 my_global_bottom_priority =
p;
663 else if (
p < my_global_bottom_priority ) {
664 int prev_bottom = my_global_bottom_priority;
665 my_global_bottom_priority =
p;
669 __TBB_ASSERT( my_global_bottom_priority <
p &&
p < my_global_top_priority, NULL );
690 my_server->adjust_job_count_estimate( delta );
700 for (
int i = 0; i < 2; ++i) {
740 #if __TBB_TASK_GROUP_CONTEXT 743 my_workers[index - 1] =
s;
748 #if __TBB_TASK_PRIORITY 749 void market::update_arena_top_priority (
arena& a, intptr_t new_priority ) {
751 __TBB_ASSERT( a.my_top_priority != new_priority, NULL );
752 priority_level_info &prev_level = my_priority_levels[a.my_top_priority],
753 &new_level = my_priority_levels[new_priority];
755 a.my_top_priority = new_priority;
760 __TBB_ASSERT( prev_level.workers_requested >= 0 && new_level.workers_requested >= 0, NULL );
763 bool market::lower_arena_priority (
arena& a, intptr_t new_priority, uintptr_t old_reload_epoch ) {
766 if ( a.my_reload_epoch != old_reload_epoch ) {
771 __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL );
773 intptr_t
p = a.my_top_priority;
774 update_arena_top_priority( a, new_priority );
776 if ( my_global_bottom_priority > new_priority ) {
777 my_global_bottom_priority = new_priority;
779 if (
p == my_global_top_priority && !my_priority_levels[
p].workers_requested ) {
781 for ( --
p;
p>my_global_bottom_priority && !my_priority_levels[
p].workers_requested; --
p )
continue;
782 update_global_top_priority(
p);
787 __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL );
792 bool market::update_arena_priority ( arena& a, intptr_t new_priority ) {
796 tbb::internal::assert_priority_valid(new_priority);
797 __TBB_ASSERT( my_global_top_priority >= a.my_top_priority || a.my_num_workers_requested <= 0, NULL );
799 if ( a.my_top_priority == new_priority ) {
802 else if ( a.my_top_priority > new_priority ) {
803 if ( a.my_bottom_priority > new_priority )
804 a.my_bottom_priority = new_priority;
807 else if ( a.my_num_workers_requested <= 0 ) {
811 __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL );
813 intptr_t
p = a.my_top_priority;
814 intptr_t highest_affected_level =
max(
p, new_priority);
815 update_arena_top_priority( a, new_priority );
817 if ( my_global_top_priority < new_priority ) {
818 update_global_top_priority(new_priority);
820 else if ( my_global_top_priority == new_priority ) {
821 advance_global_reload_epoch();
824 __TBB_ASSERT( new_priority < my_global_top_priority, NULL );
825 __TBB_ASSERT( new_priority > my_global_bottom_priority, NULL );
826 if (
p == my_global_top_priority && !my_priority_levels[
p].workers_requested ) {
829 for ( --
p; !my_priority_levels[
p].workers_requested; --
p )
continue;
831 update_global_top_priority(
p);
832 highest_affected_level =
p;
835 if (
p == my_global_bottom_priority ) {
838 __TBB_ASSERT( new_priority <= my_global_top_priority, NULL );
839 while ( my_global_bottom_priority < my_global_top_priority
840 && !my_priority_levels[my_global_bottom_priority].workers_requested )
841 ++my_global_bottom_priority;
842 __TBB_ASSERT( my_global_bottom_priority <= new_priority, NULL );
843 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 844 const bool enforced_concurrency = my_mandatory_num_requested && a.must_have_concurrency();
846 const bool enforced_concurrency =
false;
848 __TBB_ASSERT_EX( enforced_concurrency || my_priority_levels[my_global_bottom_priority].workers_requested > 0, NULL );
852 __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL );
unsigned my_public_ref_count
Count of master threads attached.
void process(job &j) __TBB_override
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
bool is_arena_in_list(arena_list_type &arenas, arena *a)
void try_destroy_arena(arena *, uintptr_t aba_epoch)
Removes the arena from the market's list.
static void remove_ref()
Remove reference to resources. If last reference removed, release the resources.
static rml::tbb_server * create_rml_server(rml::tbb_client &)
static market * theMarket
Currently active global market.
#define __TBB_offsetof(class_name, member_name)
Extended variant of the standard offsetof macro.
static const intptr_t num_priority_levels
void unlock()
Release lock.
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert
int my_total_demand
Number of workers that were requested by all arenas.
static global_market_mutex_type theMarketMutex
Mutex guarding creation/destruction of theMarket, insertions/deletions in my_arenas,...
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
unsigned my_ref_count
Reference count controlling market object lifetime.
void const char const char int ITT_FORMAT __itt_group_sync s
void update_allotment()
Recalculates the number of workers assigned to each arena in the list.
static bool UsePrivateRML
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.
static arena & allocate_arena(market &, unsigned num_slots, unsigned num_reserved_slots)
Allocate an instance of arena.
#define __TBB_TASK_PRIORITY
static generic_scheduler * create_worker(market &m, size_t index)
Initialize a scheduler for a worker thread.
void insert_arena_into_list(arena &a)
arena * arena_in_need(arena *)
Returns next arena that needs more workers, or NULL.
arenas_list_mutex_type my_arenas_list_mutex
#define ITT_THREAD_SET_NAME(name)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
void cleanup(job &j) __TBB_override
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
Work stealing task scheduler.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
uintptr_t my_arenas_aba_epoch
ABA prevention marker to assign to newly created arenas.
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
The scoped locking pattern.
static size_t active_value(parameter p)
tbb::atomic< uintptr_t > my_pool_state
Current task pool state and estimate of available tasks amount.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
unsigned my_workers_soft_limit_to_report
Either workers soft limit to be reported via runtime_warning() or skip_soft_limit_warning.
static unsigned calc_workers_soft_limit(unsigned workers_soft_limit, unsigned workers_hard_limit)
int my_num_workers_requested
The number of workers that are currently requested from the resource manager.
unsigned num_workers_active()
The number of workers active in the arena.
bool my_join_workers
Shutdown mode.
#define GATHER_STATISTIC(x)
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
atomic< unsigned > my_references
Reference counter for the arena.
T min(const T &val1, const T &val2)
Utility template function returning lesser of the two values.
static const unsigned ref_worker
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
int my_num_workers_requested
Number of workers currently requested from RML.
static unsigned app_parallelism_limit()
Reports active parallelism level according to user's settings.
uintptr_t my_aba_epoch
ABA prevention marker.
static unsigned default_num_threads()
job * create_one_job() __TBB_override
void detach_arena(arena &)
Removes the arena from the market's list.
void process(generic_scheduler &)
Registers the worker with the arena and enters TBB scheduler dispatch loop.
static bool does_client_join_workers(const tbb::internal::rml::tbb_client &client)
#define _T(string_literal)
Standard Windows style macro to markup the string literals.
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
bool is_worker() const
True if running on a worker thread, false otherwise.
atomic< unsigned > my_first_unused_worker_idx
First unused index of worker.
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
void assert_market_valid() const
static void add_ref()
Add reference to resources. If first reference added, acquire the resources.
void destroy()
Destroys and deallocates market object created by market::create()
unsigned my_num_workers_hard_limit
Maximal number of workers allowed for use by the underlying resource manager.
void adjust_demand(arena &, int delta)
Request that arena's need in workers should be adjusted.
T max(const T &val1, const T &val2)
Utility template function returning greater of the two values.
atomic< T > & as_atomic(T &t)
size_t my_stack_size
Stack size of worker threads.
arena * my_next_arena
The first arena to be checked when idle worker seeks for an arena to enter.
static const unsigned skip_soft_limit_warning
The value indicating that the soft limit warning is unnecessary.
static const pool_state_t SNAPSHOT_EMPTY
No tasks to steal since last snapshot was taken.
void lock()
Acquire writer lock.
market(unsigned workers_soft_limit, unsigned workers_hard_limit, size_t stack_size)
Constructor.
void const char const char int ITT_FORMAT __itt_group_sync p
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena.
void free_arena()
Completes arena shutdown, destructs and deallocates it.
unsigned my_num_workers_soft_limit
Current application-imposed limit on the number of workers (see set_active_num_workers())
static void set_active_num_workers(unsigned w)
Set number of active workers.
void acknowledge_close_connection() __TBB_override
market * my_market
The market that owns this arena.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
bool release(bool is_public, bool blocking_terminate)
Decrements market's refcount and destroys it in the end.
T __TBB_load_with_acquire(const volatile T &location)
rml::tbb_server * my_server
Pointer to the RML server object that services this TBB instance.
void remove_arena_from_list(arena &a)
arena_list_type my_arenas
List of registered arenas.
static generic_scheduler * local_scheduler_if_initialized()