Intel(R) Threading Building Blocks Doxygen Documentation version 4.2.3
Loading...
Searching...
No Matches
market.h
Go to the documentation of this file.
1/*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#ifndef _TBB_market_H
18#define _TBB_market_H
19
20#include "tbb/tbb_stddef.h"
21
22#include "scheduler_common.h"
23#include "tbb/atomic.h"
24#include "tbb/spin_rw_mutex.h"
25#include "../rml/include/rml_tbb.h"
26
27#include "intrusive_list.h"
28
29#if defined(_MSC_VER) && defined(_Wp64)
30 // Workaround for overzealous compiler warnings in /Wp64 mode
31 #pragma warning (push)
32 #pragma warning (disable: 4244)
33#endif
34
35namespace tbb {
36
37class task_group_context;
38
39namespace internal {
40
41//------------------------------------------------------------------------
42// Class market
43//------------------------------------------------------------------------
44
45class market : no_copy, rml::tbb_client {
46 friend class generic_scheduler;
47 friend class arena;
49 template<typename SchedulerTraits> friend class custom_scheduler;
51private:
53
56
59
61
64
68
70 rml::tbb_server* my_server;
71
73
75
77
79
82
84
87
90
91#if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
93 int my_mandatory_num_requested;
94#endif
95
96#if __TBB_TASK_PRIORITY
98
102 intptr_t my_global_top_priority;
103
105
106 intptr_t my_global_bottom_priority;
107
109
111 uintptr_t my_global_reload_epoch;
112
114 struct priority_level_info {
116 arena_list_type arenas;
117
119
120 arena *next_arena;
121
123 int workers_requested;
124
126 int workers_available;
127 }; // struct priority_level_info
128
130 priority_level_info my_priority_levels[num_priority_levels];
131
132#else /* !__TBB_TASK_PRIORITY */
133
136
138
140#endif /* !__TBB_TASK_PRIORITY */
141
144
146 unsigned my_ref_count;
147
150
153
156
158 static const unsigned skip_soft_limit_warning = ~0U;
159
162#if __TBB_COUNT_TASK_NODES
164
165 atomic<intptr_t> my_task_node_count;
166#endif /* __TBB_COUNT_TASK_NODES */
167
169 market ( unsigned workers_soft_limit, unsigned workers_hard_limit, size_t stack_size );
170
172 static market& global_market ( bool is_public, unsigned max_num_workers = 0, size_t stack_size = 0 );
173
175 void destroy ();
176
179
180#if __TBB_TASK_PRIORITY
182 arena* arena_in_need ( arena* prev_arena );
183
185
187 void update_allotment ( intptr_t highest_affected_priority );
188
190 void update_arena_top_priority ( arena& a, intptr_t newPriority );
191
193 inline void update_global_top_priority ( intptr_t newPriority );
194
196 inline void reset_global_priority ();
197
198 inline void advance_global_reload_epoch () {
199 __TBB_store_with_release( my_global_reload_epoch, my_global_reload_epoch + 1 );
200 }
201
202 void assert_market_valid () const {
203 __TBB_ASSERT( (my_priority_levels[my_global_top_priority].workers_requested > 0
204 && !my_priority_levels[my_global_top_priority].arenas.empty())
205 || (my_global_top_priority == my_global_bottom_priority &&
206 my_global_top_priority == normalized_normal_priority), NULL );
207 }
208
209#else /* !__TBB_TASK_PRIORITY */
210
212
214 void update_allotment (unsigned effective_soft_limit) {
215 if ( my_total_demand )
216 update_allotment( my_arenas, my_total_demand, (int)effective_soft_limit );
217 }
218
219 // TODO: consider to rewrite the code with is_arena_in_list function
221 arena* arena_in_need (arena* prev_arena) {
223 return NULL;
225 arena* a = NULL;
226
227 // Checks if arena is alive or not
228 if ( is_arena_in_list( my_arenas, prev_arena ) ) {
229 a = arena_in_need( my_arenas, prev_arena );
230 } else {
232 if (a) {
233 as_atomic(my_next_arena) = a; // a subject for innocent data race under the reader lock
234 // TODO: rework global round robin policy to local or random to avoid this write
235 }
236 }
237
238 return a;
239 }
240
241 void assert_market_valid () const {}
242#endif /* !__TBB_TASK_PRIORITY */
243
245 // Helpers to unify code branches dependent on priority feature presence
246
247 void insert_arena_into_list ( arena& a );
248
249 void remove_arena_from_list ( arena& a );
250
251 arena* arena_in_need ( arena_list_type &arenas, arena *hint );
252
253 int update_allotment ( arena_list_type& arenas, int total_demand, int max_workers );
254
255 bool is_arena_in_list( arena_list_type &arenas, arena *a );
256
257
259 // Implementation of rml::tbb_client interface methods
260
261 version_type version () const __TBB_override { return 0; }
262
264
265 size_t min_stack_size () const __TBB_override { return worker_stack_size(); }
266
267 policy_type policy () const __TBB_override { return throughput; }
268
270
271 void cleanup( job& j ) __TBB_override;
272
274
275 void process( job& j ) __TBB_override;
276
277public:
279
281 static arena* create_arena ( int num_slots, int num_reserved_slots, size_t stack_size );
282
284 void try_destroy_arena ( arena*, uintptr_t aba_epoch );
285
287 void detach_arena ( arena& );
288
290 bool release ( bool is_public, bool blocking_terminate );
291
292#if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
294 void enable_mandatory_concurrency_impl ( arena *a );
295
297 void enable_mandatory_concurrency ( arena *a );
298
300 void disable_mandatory_concurrency_impl(arena* a);
301
303 void mandatory_concurrency_disable ( arena *a );
304#endif /* __TBB_ENQUEUE_ENFORCED_CONCURRENCY */
305
307
308 void adjust_demand ( arena&, int delta );
309
311 bool must_join_workers () const { return my_join_workers; }
312
314 size_t worker_stack_size () const { return my_stack_size; }
315
317 static void set_active_num_workers( unsigned w );
318
320 static unsigned app_parallelism_limit();
321
322#if _WIN32||_WIN64
324 void register_master( ::rml::server::execution_resource_t& rsc_handle ) {
325 __TBB_ASSERT( my_server, "RML server not defined?" );
326 // the server may ignore registration and set master_exec_resource to NULL.
327 my_server->register_master( rsc_handle );
328 }
329
331 void unregister_master( ::rml::server::execution_resource_t& rsc_handle ) const {
332 my_server->unregister_master( rsc_handle );
333 }
334#endif /* WIN */
335
336#if __TBB_TASK_GROUP_CONTEXT
338
342 template <typename T>
343 bool propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );
344#endif /* __TBB_TASK_GROUP_CONTEXT */
345
346#if __TBB_TASK_PRIORITY
348
349 bool lower_arena_priority ( arena& a, intptr_t new_priority, uintptr_t old_reload_epoch );
350
352
358 bool update_arena_priority ( arena& a, intptr_t new_priority );
359#endif /* __TBB_TASK_PRIORITY */
360
361#if __TBB_COUNT_TASK_NODES
363
364 void update_task_node_count( intptr_t delta ) { my_task_node_count += delta; }
365#endif /* __TBB_COUNT_TASK_NODES */
366
367#if __TBB_TASK_GROUP_CONTEXT
369 scheduler_list_type my_masters;
370
372
374 generic_scheduler* my_workers[1];
375#endif /* __TBB_TASK_GROUP_CONTEXT */
376
377 static unsigned max_num_workers() {
378 global_market_mutex_type::scoped_lock lock( theMarketMutex );
380 }
381}; // class market
382
383} // namespace internal
384} // namespace tbb
385
386#if defined(_MSC_VER) && defined(_Wp64)
387 // Workaround for overzealous compiler warnings in /Wp64 mode
388 #pragma warning (pop)
389#endif // warning 4244 is back
390
391#endif /* _TBB_market_H */
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition tbb_stddef.h:165
#define __TBB_override
Definition tbb_stddef.h:240
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
The graph class.
@ release
Release.
Definition atomic.h:59
T __TBB_load_with_acquire(const volatile T &location)
__TBB_SCHEDULER_MUTEX_TYPE scheduler_mutex_type
Mutex type for global locks in the scheduler.
atomic< T > & as_atomic(T &t)
Definition atomic.h:572
static const intptr_t num_priority_levels
void __TBB_store_with_release(volatile T &location, V value)
Fast, unfair, spinning reader-writer lock with backoff and writer-preference.
The scoped locking pattern.
Used to form groups of tasks.
Definition task.h:358
Base class for types that should not be copied or assigned.
Definition tbb_stddef.h:330
A scheduler with a customized evaluation loop.
Double linked list of items of type T that is derived from intrusive_list_node class.
uintptr_t my_arenas_aba_epoch
ABA prevention marker to assign to newly created arenas.
Definition market.h:143
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
Definition market.cpp:96
unsigned my_num_workers_hard_limit
Maximal number of workers allowed for use by the underlying resource manager.
Definition market.h:74
unsigned max_job_count() const __TBB_override
Definition market.h:263
static market * theMarket
Currently active global market.
Definition market.h:58
intrusive_list< arena > arena_list_type
Definition market.h:54
void adjust_demand(arena &, int delta)
Request that arena's need in workers should be adjusted.
Definition market.cpp:557
atomic< unsigned > my_first_unused_worker_idx
First unused index of worker.
Definition market.h:86
bool my_join_workers
Shutdown mode.
Definition market.h:155
void cleanup(job &j) __TBB_override
Definition market.cpp:681
unsigned my_public_ref_count
Count of master threads attached.
Definition market.h:149
version_type version() const __TBB_override
Definition market.h:261
static unsigned max_num_workers()
Definition market.h:377
static const unsigned skip_soft_limit_warning
The value indicating that the soft limit warning is unnecessary.
Definition market.h:158
static void set_active_num_workers(unsigned w)
Set number of active workers.
Definition market.cpp:235
void process(job &j) __TBB_override
Definition market.cpp:659
bool must_join_workers() const
Used when RML asks for join mode during workers termination.
Definition market.h:311
void detach_arena(arena &)
Removes the arena from the market's list.
Definition market.cpp:322
void destroy()
Destroys and deallocates market object created by market::create()
Definition market.cpp:165
int update_workers_request()
Recalculates the number of workers requested from RML and updates the allotment.
Definition market.cpp:217
void try_destroy_arena(arena *, uintptr_t aba_epoch)
Removes the arena from the market's list.
Definition market.cpp:333
size_t min_stack_size() const __TBB_override
Definition market.h:265
void insert_arena_into_list(arena &a)
Definition market.cpp:29
friend void ITT_DoUnsafeOneTimeInitialization()
static unsigned app_parallelism_limit()
Reports active parallelism level according to user's settings.
Definition tbb_main.cpp:512
int my_num_workers_requested
Number of workers currently requested from RML.
Definition market.h:81
arenas_list_mutex_type my_arenas_list_mutex
Definition market.h:67
friend class generic_scheduler
Definition market.h:46
spin_rw_mutex arenas_list_mutex_type
Lightweight mutex guarding accounting operations with arenas list.
Definition market.h:66
void remove_arena_from_list(arena &a)
Definition market.cpp:42
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.
Definition market.cpp:308
arena_list_type my_arenas
List of registered arenas.
Definition market.h:135
intrusive_list< generic_scheduler > scheduler_list_type
Definition market.h:55
bool is_arena_in_list(arena_list_type &arenas, arena *a)
Definition market.cpp:424
friend class tbb::task_group_context
Definition market.h:50
void assert_market_valid() const
Definition market.h:241
void acknowledge_close_connection() __TBB_override
Definition market.cpp:695
unsigned my_workers_soft_limit_to_report
Either workers soft limit to be reported via runtime_warning() or skip_soft_limit_warning.
Definition market.h:161
policy_type policy() const __TBB_override
Definition market.h:267
int my_total_demand
Number of workers that were requested by all arenas.
Definition market.h:89
unsigned my_ref_count
Reference count controlling market object lifetime.
Definition market.h:146
size_t worker_stack_size() const
Returns the requested stack size of worker threads.
Definition market.h:314
size_t my_stack_size
Stack size of worker threads.
Definition market.h:152
arena * arena_in_need(arena *prev_arena)
Returns next arena that needs more workers, or NULL.
Definition market.h:221
friend class arena
Definition market.h:47
void update_allotment(unsigned effective_soft_limit)
Recalculates the number of workers assigned to each arena in the list.
Definition market.h:214
static global_market_mutex_type theMarketMutex
Mutex guarding creation/destruction of theMarket, insertions/deletions in my_arenas,...
Definition market.h:63
job * create_one_job() __TBB_override
Definition market.cpp:699
unsigned my_num_workers_soft_limit
Current application-imposed limit on the number of workers (see set_active_num_workers())
Definition market.h:78
scheduler_mutex_type global_market_mutex_type
Definition market.h:60
rml::tbb_server * my_server
Pointer to the RML server object that services this TBB instance.
Definition market.h:70
arena * my_next_arena
The first arena to be checked when idle worker seeks for an arena to enter.
Definition market.h:139
Work stealing task scheduler.
Definition scheduler.h:140

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.