Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
governor.cpp
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 
16 
17 
18 
19 */
20 
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include "governor.h"
24 #include "tbb_main.h"
25 #include "scheduler.h"
26 #include "market.h"
27 #include "arena.h"
28 
30 
31 #include "dynamic_link.h"
32 
33 namespace tbb {
34 namespace internal {
35 
36 //------------------------------------------------------------------------
37 // governor
38 //------------------------------------------------------------------------
39 
40 #if __TBB_SURVIVE_THREAD_SWITCH
41 // Support for interoperability with Intel(R) Cilk(TM) Plus.
42 
43 #if _WIN32
44 #define CILKLIB_NAME "cilkrts20.dll"
45 #else
46 #define CILKLIB_NAME "libcilkrts.so"
47 #endif
48 
50 static __cilk_tbb_retcode (*watch_stack_handler)(struct __cilk_tbb_unwatch_thunk* u,
51  struct __cilk_tbb_stack_op_thunk o);
52 
54 static const dynamic_link_descriptor CilkLinkTable[] = {
55  DLD_NOWEAK(__cilkrts_watch_stack, watch_stack_handler)
56 };
57 
58 static atomic<do_once_state> cilkrts_load_state;
59 
60 bool initialize_cilk_interop() {
61  // Pinning can fail. This is a normal situation, and means that the current
62  // thread does not use cilkrts and consequently does not need interop.
63  return dynamic_link( CILKLIB_NAME, CilkLinkTable, 1, /*handle=*/0, DYNAMIC_LINK_GLOBAL );
64 }
65 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
66 
67 namespace rml {
68  tbb_server* make_private_server( tbb_client& client );
69 }
70 
72 #if USE_PTHREAD
73  int status = theTLS.create(auto_terminate);
74 #else
75  int status = theTLS.create();
76 #endif
77  if( status )
78  handle_perror(status, "TBB failed to initialize task scheduler TLS\n");
81 }
82 
84  theRMLServerFactory.close();
86 #if TBB_USE_ASSERT
88  runtime_warning( "TBB is unloaded while tbb::task_scheduler_init object is alive?" );
89 #endif
90  int status = theTLS.destroy();
91  if( status )
92  runtime_warning("failed to destroy task scheduler TLS: %s", strerror(status));
94 }
95 
96 rml::tbb_server* governor::create_rml_server ( rml::tbb_client& client ) {
97  rml::tbb_server* server = NULL;
98  if( !UsePrivateRML ) {
99  ::rml::factory::status_type status = theRMLServerFactory.make_server( server, client );
100  if( status != ::rml::factory::st_success ) {
101  UsePrivateRML = true;
102  runtime_warning( "rml::tbb_factory::make_server failed with status %x, falling back on private rml", status );
103  }
104  }
105  if ( !server ) {
106  __TBB_ASSERT( UsePrivateRML, NULL );
107  server = rml::make_private_server( client );
108  }
109  __TBB_ASSERT( server, "Failed to create RML server" );
110  return server;
111 }
112 
113 
115  __TBB_ASSERT( (uintptr_t(s)&1) == 0, "Bad pointer to the scheduler" );
116  // LSB marks the scheduler initialized with arena
117  return uintptr_t(s) | uintptr_t((s && (s->my_arena || s->is_worker()))? 1 : 0);
118 }
119 
121  theTLS.set( tls_value_of(s) );
122 }
123 
125  return theTLS.get() == tls_value_of(s);
126 }
127 
129  __TBB_ASSERT( is_set(NULL) && s, NULL );
130  assume_scheduler( s );
131 #if __TBB_SURVIVE_THREAD_SWITCH
132  if( watch_stack_handler ) {
134  o.routine = &stack_op_handler;
135  o.data = s;
136  if( (*watch_stack_handler)(&s->my_cilk_unwatch_thunk, o) ) {
137  // Failed to register with cilkrts, make sure we are clean
138  s->my_cilk_unwatch_thunk.routine = NULL;
139  }
140 #if TBB_USE_ASSERT
141  else
142  s->my_cilk_state = generic_scheduler::cs_running;
143 #endif /* TBB_USE_ASSERT */
144  }
145 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
146  __TBB_ASSERT( is_set(s), NULL );
147 }
148 
151  __TBB_ASSERT( is_set(s), "attempt to unregister a wrong scheduler instance" );
152  assume_scheduler(NULL);
153 #if __TBB_SURVIVE_THREAD_SWITCH
154  __cilk_tbb_unwatch_thunk &ut = s->my_cilk_unwatch_thunk;
155  if ( ut.routine )
156  (*ut.routine)(ut.data);
157 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
158 }
159 
163 #if __TBB_SURVIVE_THREAD_SWITCH
164  atomic_do_once( &initialize_cilk_interop, cilkrts_load_state );
165 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
166 }
167 
169  one_time_init();
170  __TBB_ASSERT( is_set(NULL), "TLS contains a scheduler?" );
171  generic_scheduler* s = generic_scheduler::create_master( NULL ); // without arena
172  s->my_auto_initialized = true;
173  return s;
174 }
175 
176 generic_scheduler* governor::init_scheduler( int num_threads, stack_size_type stack_size, bool auto_init ) {
177  one_time_init();
178  if ( uintptr_t v = theTLS.get() ) {
180  if ( (v&1) == 0 ) { // TLS holds scheduler instance without arena
181  __TBB_ASSERT( s->my_ref_count == 1, "weakly initialized scheduler must have refcount equal to 1" );
182  __TBB_ASSERT( !s->my_arena, "weakly initialized scheduler must have no arena" );
183  __TBB_ASSERT( s->my_auto_initialized, "weakly initialized scheduler is supposed to be auto-initialized" );
184  s->attach_arena( market::create_arena( default_num_threads(), 1, 0 ), 0, /*is_master*/true );
185  __TBB_ASSERT( s->my_arena_index == 0, "Master thread must occupy the first slot in its arena" );
186  s->my_arena_slot->my_scheduler = s;
187  s->my_arena->my_default_ctx = s->default_context(); // it also transfers implied ownership
188  // Mark the scheduler as fully initialized
189  assume_scheduler( s );
190  }
191  // Increment refcount only for explicit instances of task_scheduler_init.
192  if ( !auto_init ) s->my_ref_count += 1;
193  __TBB_ASSERT( s->my_arena, "scheduler is not initialized fully" );
194  return s;
195  }
196  // Create new scheduler instance with arena
197  if( num_threads == task_scheduler_init::automatic )
198  num_threads = default_num_threads();
199  arena *a = market::create_arena( num_threads, 1, stack_size );
201  __TBB_ASSERT(s, "Somehow a local scheduler creation for a master thread failed");
202  __TBB_ASSERT( is_set(s), NULL );
203  s->my_auto_initialized = auto_init;
204  return s;
205 }
206 
208  bool ok = false;
209  __TBB_ASSERT( is_set(s), "Attempt to terminate non-local scheduler instance" );
210  if (0 == --(s->my_ref_count)) {
211  ok = s->cleanup_master( blocking );
212  __TBB_ASSERT( is_set(NULL), "cleanup_master has not cleared its TLS slot" );
213  }
214  return ok;
215 }
216 
217 void governor::auto_terminate(void* arg){
218  generic_scheduler* s = tls_scheduler_of( uintptr_t(arg) ); // arg is equivalent to theTLS.get()
219  if( s && s->my_auto_initialized ) {
220  if( !--(s->my_ref_count) ) {
221  // If the TLS slot is already cleared by OS or underlying concurrency
222  // runtime, restore its value.
223  if( !is_set(s) )
225  s->cleanup_master( /*blocking_terminate=*/false );
226  __TBB_ASSERT( is_set(NULL), "cleanup_master has not cleared its TLS slot" );
227  }
228  }
229 }
230 
232  if ( UsePrivateRML )
233  PrintExtraVersionInfo( "RML", "private" );
234  else {
235  PrintExtraVersionInfo( "RML", "shared" );
236  theRMLServerFactory.call_with_server_info( PrintRMLVersionInfo, (void*)"" );
237  }
238 #if __TBB_SURVIVE_THREAD_SWITCH
239  if( watch_stack_handler )
240  PrintExtraVersionInfo( "CILK", CILKLIB_NAME );
241 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
242 }
243 
245  ::rml::factory::status_type res = theRMLServerFactory.open();
246  UsePrivateRML = res != ::rml::factory::st_success;
247 }
248 
249 #if __TBB_SURVIVE_THREAD_SWITCH
250 __cilk_tbb_retcode governor::stack_op_handler( __cilk_tbb_stack_op op, void* data ) {
251  __TBB_ASSERT(data,NULL);
252  generic_scheduler* s = static_cast<generic_scheduler*>(data);
253 #if TBB_USE_ASSERT
254  void* current = local_scheduler_if_initialized();
255 #if _WIN32||_WIN64
256  uintptr_t thread_id = GetCurrentThreadId();
257 #else
258  uintptr_t thread_id = uintptr_t(pthread_self());
259 #endif
260 #endif /* TBB_USE_ASSERT */
261  switch( op ) {
262  case CILK_TBB_STACK_ADOPT: {
263  __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo ||
264  current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid adoption" );
265 #if TBB_USE_ASSERT
266  if( current==s )
267  runtime_warning( "redundant adoption of %p by thread %p\n", s, (void*)thread_id );
268  s->my_cilk_state = generic_scheduler::cs_running;
269 #endif /* TBB_USE_ASSERT */
270  assume_scheduler( s );
271  break;
272  }
273  case CILK_TBB_STACK_ORPHAN: {
274  __TBB_ASSERT( current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid orphaning" );
275 #if TBB_USE_ASSERT
276  s->my_cilk_state = generic_scheduler::cs_limbo;
277 #endif /* TBB_USE_ASSERT */
278  assume_scheduler(NULL);
279  break;
280  }
281  case CILK_TBB_STACK_RELEASE: {
282  __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo ||
283  current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid release" );
284 #if TBB_USE_ASSERT
285  s->my_cilk_state = generic_scheduler::cs_freed;
286 #endif /* TBB_USE_ASSERT */
287  s->my_cilk_unwatch_thunk.routine = NULL;
288  auto_terminate( s );
289  break;
290  }
291  default:
292  __TBB_ASSERT(0, "invalid op");
293  }
294  return 0;
295 }
296 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
297 
298 } // namespace internal
299 
300 //------------------------------------------------------------------------
301 // task_scheduler_init
302 //------------------------------------------------------------------------
303 
304 using namespace internal;
305 
307 void task_scheduler_init::initialize( int number_of_threads ) {
308  initialize( number_of_threads, 0 );
309 }
310 
311 void task_scheduler_init::initialize( int number_of_threads, stack_size_type thread_stack_size ) {
312 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
313  uintptr_t new_mode = thread_stack_size & propagation_mode_mask;
314 #endif
315  thread_stack_size &= ~(stack_size_type)propagation_mode_mask;
316  if( number_of_threads!=deferred ) {
317  __TBB_ASSERT_RELEASE( !my_scheduler, "task_scheduler_init already initialized" );
318  __TBB_ASSERT_RELEASE( number_of_threads==automatic || number_of_threads > 0,
319  "number_of_threads for task_scheduler_init must be automatic or positive" );
320  internal::generic_scheduler *s = governor::init_scheduler( number_of_threads, thread_stack_size, /*auto_init=*/false );
321 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
322  if ( s->master_outermost_level() ) {
323  uintptr_t &vt = s->default_context()->my_version_and_traits;
324  uintptr_t prev_mode = vt & task_group_context::exact_exception ? propagation_mode_exact : 0;
325  vt = new_mode & propagation_mode_exact ? vt | task_group_context::exact_exception
326  : new_mode & propagation_mode_captured ? vt & ~task_group_context::exact_exception : vt;
327  // Use least significant bit of the scheduler pointer to store previous mode.
328  // This is necessary when components compiled with different compilers and/or
329  // TBB versions initialize the
330  my_scheduler = static_cast<scheduler*>((generic_scheduler*)((uintptr_t)s | prev_mode));
331  }
332  else
333 #endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
334  my_scheduler = s;
335  } else {
336  __TBB_ASSERT_RELEASE( !thread_stack_size, "deferred initialization ignores stack size setting" );
337  }
338 }
339 
341 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
342  uintptr_t prev_mode = (uintptr_t)my_scheduler & propagation_mode_exact;
343  my_scheduler = (scheduler*)((uintptr_t)my_scheduler & ~(uintptr_t)propagation_mode_exact);
344 #endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
345  generic_scheduler* s = static_cast<generic_scheduler*>(my_scheduler);
346  my_scheduler = NULL;
347  __TBB_ASSERT_RELEASE( s, "task_scheduler_init::terminate without corresponding task_scheduler_init::initialize()");
348 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
349  if ( s->master_outermost_level() ) {
350  uintptr_t &vt = s->default_context()->my_version_and_traits;
351  vt = prev_mode & propagation_mode_exact ? vt | task_group_context::exact_exception
352  : vt & ~task_group_context::exact_exception;
353  }
354 #endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
355  return governor::terminate_scheduler(s, blocking);
356 }
357 
359  internal_terminate(/*blocking_terminate=*/false);
360 }
361 
362 #if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
363 bool task_scheduler_init::internal_blocking_terminate( bool throwing ) {
364  bool ok = internal_terminate( /*blocking_terminate=*/true );
365 #if TBB_USE_EXCEPTIONS
366  if( throwing && !ok )
367  throw_exception( eid_blocking_thread_join_impossible );
368 #else
369  suppress_unused_warning( throwing );
370 #endif
371  return ok;
372 }
373 #endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
374 
377 }
378 
379 } // namespace tbb
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
static bool is_rethrow_broken
Definition: governor.h:69
bool gcc_rethrow_exception_broken()
Definition: tbb_misc.cpp:189
static void print_version_info()
Definition: governor.cpp:231
__cilk_tbb_pfn_stack_op routine
static generic_scheduler * init_scheduler_weak()
Automatic initialization of scheduler in a master thread with default settings without arena.
Definition: governor.cpp:168
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
static generic_scheduler * create_master(arena *a)
Initialize a scheduler for a master thread.
Definition: scheduler.cpp:1252
static const int automatic
Typedef for number of threads that is automatic.
Used to form groups of tasks.
Definition: task.h:335
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:381
static unsigned default_num_threads()
Definition: governor.h:85
void PrintRMLVersionInfo(void *arg, const char *server_info)
A callback routine to print RML version information on stderr.
Definition: tbb_misc.cpp:213
static void release_resources()
Destroy the thread-local storage key and deinitialize RML.
Definition: governor.cpp:83
void __TBB_EXPORTED_METHOD initialize(int number_of_threads=automatic)
Ensure that scheduler exists for this thread.
Definition: governor.cpp:307
static rml::tbb_server * create_rml_server(rml::tbb_client &)
Definition: governor.cpp:96
static generic_scheduler * local_scheduler_if_initialized()
Definition: governor.h:136
int __cilk_tbb_retcode
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.
Definition: market.cpp:300
void PrintExtraVersionInfo(const char *category, const char *format,...)
Prints arbitrary extra TBB version information on stderr.
Definition: tbb_misc.cpp:202
Work stealing task scheduler.
Definition: scheduler.h:124
Association between a handler name and location of pointer to it.
Definition: dynamic_link.h:64
static void sign_off(generic_scheduler *s)
Unregister TBB scheduler instance from thread-local storage.
Definition: governor.cpp:149
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
Definition: governor.cpp:120
static rml::tbb_factory theRMLServerFactory
Definition: governor.h:63
OPEN_INTERNAL_NAMESPACE bool dynamic_link(const char *, const dynamic_link_descriptor *, size_t, dynamic_link_handle *handle, int)
static bool UsePrivateRML
Definition: governor.h:65
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
static uintptr_t tls_value_of(generic_scheduler *s)
Computes the value of the TLS.
Definition: governor.cpp:114
static bool is_speculation_enabled
Definition: governor.h:68
static void acquire_resources()
Create key for thread-local storage and initialize RML.
Definition: governor.cpp:71
void set(T value)
Definition: tls.h:60
static bool initialization_done()
Definition: tbb_main.h:68
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void * data
static void sign_on(generic_scheduler *s)
Register TBB scheduler instance in thread-local storage.
Definition: governor.cpp:128
static generic_scheduler * tls_scheduler_of(uintptr_t v)
Converts TLS value to the scheduler pointer.
Definition: governor.h:119
static void initialize_rml_factory()
Definition: governor.cpp:244
The graph class.
__cilk_tbb_stack_op
static basic_tls< uintptr_t > theTLS
TLS for scheduler instances associated with individual threads.
Definition: governor.h:58
void destroy_process_mask()
Definition: tbb_misc.h:263
static void one_time_init()
Definition: governor.cpp:160
void atomic_do_once(const F &initializer, atomic< do_once_state > &state)
One-time initialization function.
Definition: tbb_misc.h:210
static int __TBB_EXPORTED_FUNC default_num_threads()
Returns the number of threads TBB scheduler would create if initialized by default.
Definition: governor.cpp:375
void DoOneTimeInitializations()
Performs thread-safe lazy one-time general TBB initialization.
Definition: tbb_main.cpp:218
const int DYNAMIC_LINK_GLOBAL
Definition: dynamic_link.h:81
#define __TBB_ASSERT_RELEASE(predicate, message)
Definition: tbb_stddef.h:138
std::size_t stack_size_type
void const char const char int ITT_FORMAT __itt_group_sync s
tbb_server * make_private_server(tbb_client &client)
Factory method called from task.cpp to create a private_server.
static void auto_terminate(void *scheduler)
The internal routine to undo automatic initialization.
Definition: governor.cpp:217
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
Definition: governor.cpp:124
bool cpu_has_speculation()
check for transaction support.
Definition: tbb_misc.cpp:221
void dynamic_unlink_all()
__cilk_tbb_pfn_unwatch_stacks routine
void __TBB_EXPORTED_FUNC handle_perror(int error_code, const char *aux_info)
Throws std::runtime_error with what() returning error_code description prefixed with aux_info.
Definition: tbb_misc.cpp:78
void __TBB_EXPORTED_METHOD terminate()
Inverse of method initialize.
Definition: governor.cpp:358
static bool terminate_scheduler(generic_scheduler *s, bool blocking)
Processes scheduler termination request (possibly nested) in a master thread.
Definition: governor.cpp:207
static generic_scheduler * init_scheduler(int num_threads, stack_size_type stack_size, bool auto_init)
Processes scheduler initialization request (possibly nested) in a master thread.
Definition: governor.cpp:176
bool internal_terminate(bool blocking)
Definition: governor.cpp:340
#define DLD_NOWEAK(s, h)
Definition: dynamic_link.h:61
CILK_EXPORT __cilk_tbb_retcode __cilkrts_watch_stack(struct __cilk_tbb_unwatch_thunk *u, struct __cilk_tbb_stack_op_thunk o)

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.