libstdc++
atomic_base.h
Go to the documentation of this file.
1 // -*- C++ -*- header.
2 
3 // Copyright (C) 2008-2019 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/atomic_base.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{atomic}
28  */
29 
30 #ifndef _GLIBCXX_ATOMIC_BASE_H
31 #define _GLIBCXX_ATOMIC_BASE_H 1
32 
33 #pragma GCC system_header
34 
35 #include <bits/c++config.h>
36 #include <stdint.h>
38 
39 #ifndef _GLIBCXX_ALWAYS_INLINE
40 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
41 #endif
42 
43 namespace std _GLIBCXX_VISIBILITY(default)
44 {
45 _GLIBCXX_BEGIN_NAMESPACE_VERSION
46 
47  /**
48  * @defgroup atomics Atomics
49  *
50  * Components for performing atomic operations.
51  * @{
52  */
53 
54  /// Enumeration for memory_order
55  typedef enum memory_order
56  {
57  memory_order_relaxed,
58  memory_order_consume,
59  memory_order_acquire,
60  memory_order_release,
61  memory_order_acq_rel,
62  memory_order_seq_cst
63  } memory_order;
64 
65  enum __memory_order_modifier
66  {
67  __memory_order_mask = 0x0ffff,
68  __memory_order_modifier_mask = 0xffff0000,
69  __memory_order_hle_acquire = 0x10000,
70  __memory_order_hle_release = 0x20000
71  };
72 
73  constexpr memory_order
74  operator|(memory_order __m, __memory_order_modifier __mod)
75  {
76  return memory_order(__m | int(__mod));
77  }
78 
79  constexpr memory_order
80  operator&(memory_order __m, __memory_order_modifier __mod)
81  {
82  return memory_order(__m & int(__mod));
83  }
84 
85  // Drop release ordering as per [atomics.types.operations.req]/21
86  constexpr memory_order
87  __cmpexch_failure_order2(memory_order __m) noexcept
88  {
89  return __m == memory_order_acq_rel ? memory_order_acquire
90  : __m == memory_order_release ? memory_order_relaxed : __m;
91  }
92 
93  constexpr memory_order
94  __cmpexch_failure_order(memory_order __m) noexcept
95  {
96  return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
97  | (__m & __memory_order_modifier_mask));
98  }
99 
100  _GLIBCXX_ALWAYS_INLINE void
101  atomic_thread_fence(memory_order __m) noexcept
102  { __atomic_thread_fence(__m); }
103 
104  _GLIBCXX_ALWAYS_INLINE void
105  atomic_signal_fence(memory_order __m) noexcept
106  { __atomic_signal_fence(__m); }
107 
108  /// kill_dependency
109  template<typename _Tp>
110  inline _Tp
111  kill_dependency(_Tp __y) noexcept
112  {
113  _Tp __ret(__y);
114  return __ret;
115  }
116 
117 
118  // Base types for atomics.
119  template<typename _IntTp>
121 
122 
123 #define ATOMIC_VAR_INIT(_VI) { _VI }
124 
125  template<typename _Tp>
126  struct atomic;
127 
128  template<typename _Tp>
129  struct atomic<_Tp*>;
130 
131  /* The target's "set" value for test-and-set may not be exactly 1. */
132 #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
133  typedef bool __atomic_flag_data_type;
134 #else
135  typedef unsigned char __atomic_flag_data_type;
136 #endif
137 
138  /**
139  * @brief Base type for atomic_flag.
140  *
141  * Base type is POD with data, allowing atomic_flag to derive from
142  * it and meet the standard layout type requirement. In addition to
143  * compatibility with a C interface, this allows different
144  * implementations of atomic_flag to use the same atomic operation
145  * functions, via a standard conversion to the __atomic_flag_base
146  * argument.
147  */
148  _GLIBCXX_BEGIN_EXTERN_C
149 
151  {
152  __atomic_flag_data_type _M_i;
153  };
154 
155  _GLIBCXX_END_EXTERN_C
156 
157 #define ATOMIC_FLAG_INIT { 0 }
158 
159  /// atomic_flag
161  {
162  atomic_flag() noexcept = default;
163  ~atomic_flag() noexcept = default;
164  atomic_flag(const atomic_flag&) = delete;
165  atomic_flag& operator=(const atomic_flag&) = delete;
166  atomic_flag& operator=(const atomic_flag&) volatile = delete;
167 
168  // Conversion to ATOMIC_FLAG_INIT.
169  constexpr atomic_flag(bool __i) noexcept
170  : __atomic_flag_base{ _S_init(__i) }
171  { }
172 
173  _GLIBCXX_ALWAYS_INLINE bool
174  test_and_set(memory_order __m = memory_order_seq_cst) noexcept
175  {
176  return __atomic_test_and_set (&_M_i, __m);
177  }
178 
179  _GLIBCXX_ALWAYS_INLINE bool
180  test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
181  {
182  return __atomic_test_and_set (&_M_i, __m);
183  }
184 
185  _GLIBCXX_ALWAYS_INLINE void
186  clear(memory_order __m = memory_order_seq_cst) noexcept
187  {
188  memory_order __b = __m & __memory_order_mask;
189  __glibcxx_assert(__b != memory_order_consume);
190  __glibcxx_assert(__b != memory_order_acquire);
191  __glibcxx_assert(__b != memory_order_acq_rel);
192 
193  __atomic_clear (&_M_i, __m);
194  }
195 
196  _GLIBCXX_ALWAYS_INLINE void
197  clear(memory_order __m = memory_order_seq_cst) volatile noexcept
198  {
199  memory_order __b = __m & __memory_order_mask;
200  __glibcxx_assert(__b != memory_order_consume);
201  __glibcxx_assert(__b != memory_order_acquire);
202  __glibcxx_assert(__b != memory_order_acq_rel);
203 
204  __atomic_clear (&_M_i, __m);
205  }
206 
207  private:
208  static constexpr __atomic_flag_data_type
209  _S_init(bool __i)
210  { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
211  };
212 
213 
214  /// Base class for atomic integrals.
215  //
216  // For each of the integral types, define atomic_[integral type] struct
217  //
218  // atomic_bool bool
219  // atomic_char char
220  // atomic_schar signed char
221  // atomic_uchar unsigned char
222  // atomic_short short
223  // atomic_ushort unsigned short
224  // atomic_int int
225  // atomic_uint unsigned int
226  // atomic_long long
227  // atomic_ulong unsigned long
228  // atomic_llong long long
229  // atomic_ullong unsigned long long
230  // atomic_char8_t char8_t
231  // atomic_char16_t char16_t
232  // atomic_char32_t char32_t
233  // atomic_wchar_t wchar_t
234  //
235  // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
236  // 8 bytes, since that is what GCC built-in functions for atomic
237  // memory access expect.
238  template<typename _ITp>
239  struct __atomic_base
240  {
241  using value_type = _ITp;
242  using difference_type = value_type;
243 
244  private:
245  typedef _ITp __int_type;
246 
247  static constexpr int _S_alignment =
248  sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
249 
250  alignas(_S_alignment) __int_type _M_i;
251 
252  public:
253  __atomic_base() noexcept = default;
254  ~__atomic_base() noexcept = default;
255  __atomic_base(const __atomic_base&) = delete;
256  __atomic_base& operator=(const __atomic_base&) = delete;
257  __atomic_base& operator=(const __atomic_base&) volatile = delete;
258 
259  // Requires __int_type convertible to _M_i.
260  constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
261 
262  operator __int_type() const noexcept
263  { return load(); }
264 
265  operator __int_type() const volatile noexcept
266  { return load(); }
267 
268  __int_type
269  operator=(__int_type __i) noexcept
270  {
271  store(__i);
272  return __i;
273  }
274 
275  __int_type
276  operator=(__int_type __i) volatile noexcept
277  {
278  store(__i);
279  return __i;
280  }
281 
282  __int_type
283  operator++(int) noexcept
284  { return fetch_add(1); }
285 
286  __int_type
287  operator++(int) volatile noexcept
288  { return fetch_add(1); }
289 
290  __int_type
291  operator--(int) noexcept
292  { return fetch_sub(1); }
293 
294  __int_type
295  operator--(int) volatile noexcept
296  { return fetch_sub(1); }
297 
298  __int_type
299  operator++() noexcept
300  { return __atomic_add_fetch(&_M_i, 1, memory_order_seq_cst); }
301 
302  __int_type
303  operator++() volatile noexcept
304  { return __atomic_add_fetch(&_M_i, 1, memory_order_seq_cst); }
305 
306  __int_type
307  operator--() noexcept
308  { return __atomic_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
309 
310  __int_type
311  operator--() volatile noexcept
312  { return __atomic_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
313 
314  __int_type
315  operator+=(__int_type __i) noexcept
316  { return __atomic_add_fetch(&_M_i, __i, memory_order_seq_cst); }
317 
318  __int_type
319  operator+=(__int_type __i) volatile noexcept
320  { return __atomic_add_fetch(&_M_i, __i, memory_order_seq_cst); }
321 
322  __int_type
323  operator-=(__int_type __i) noexcept
324  { return __atomic_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
325 
326  __int_type
327  operator-=(__int_type __i) volatile noexcept
328  { return __atomic_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
329 
330  __int_type
331  operator&=(__int_type __i) noexcept
332  { return __atomic_and_fetch(&_M_i, __i, memory_order_seq_cst); }
333 
334  __int_type
335  operator&=(__int_type __i) volatile noexcept
336  { return __atomic_and_fetch(&_M_i, __i, memory_order_seq_cst); }
337 
338  __int_type
339  operator|=(__int_type __i) noexcept
340  { return __atomic_or_fetch(&_M_i, __i, memory_order_seq_cst); }
341 
342  __int_type
343  operator|=(__int_type __i) volatile noexcept
344  { return __atomic_or_fetch(&_M_i, __i, memory_order_seq_cst); }
345 
346  __int_type
347  operator^=(__int_type __i) noexcept
348  { return __atomic_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
349 
350  __int_type
351  operator^=(__int_type __i) volatile noexcept
352  { return __atomic_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
353 
354  bool
355  is_lock_free() const noexcept
356  {
357  // Use a fake, minimally aligned pointer.
358  return __atomic_is_lock_free(sizeof(_M_i),
359  reinterpret_cast<void *>(-_S_alignment));
360  }
361 
362  bool
363  is_lock_free() const volatile noexcept
364  {
365  // Use a fake, minimally aligned pointer.
366  return __atomic_is_lock_free(sizeof(_M_i),
367  reinterpret_cast<void *>(-_S_alignment));
368  }
369 
370  _GLIBCXX_ALWAYS_INLINE void
371  store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
372  {
373  memory_order __b = __m & __memory_order_mask;
374  __glibcxx_assert(__b != memory_order_acquire);
375  __glibcxx_assert(__b != memory_order_acq_rel);
376  __glibcxx_assert(__b != memory_order_consume);
377 
378  __atomic_store_n(&_M_i, __i, __m);
379  }
380 
381  _GLIBCXX_ALWAYS_INLINE void
382  store(__int_type __i,
383  memory_order __m = memory_order_seq_cst) volatile noexcept
384  {
385  memory_order __b = __m & __memory_order_mask;
386  __glibcxx_assert(__b != memory_order_acquire);
387  __glibcxx_assert(__b != memory_order_acq_rel);
388  __glibcxx_assert(__b != memory_order_consume);
389 
390  __atomic_store_n(&_M_i, __i, __m);
391  }
392 
393  _GLIBCXX_ALWAYS_INLINE __int_type
394  load(memory_order __m = memory_order_seq_cst) const noexcept
395  {
396  memory_order __b = __m & __memory_order_mask;
397  __glibcxx_assert(__b != memory_order_release);
398  __glibcxx_assert(__b != memory_order_acq_rel);
399 
400  return __atomic_load_n(&_M_i, __m);
401  }
402 
403  _GLIBCXX_ALWAYS_INLINE __int_type
404  load(memory_order __m = memory_order_seq_cst) const volatile noexcept
405  {
406  memory_order __b = __m & __memory_order_mask;
407  __glibcxx_assert(__b != memory_order_release);
408  __glibcxx_assert(__b != memory_order_acq_rel);
409 
410  return __atomic_load_n(&_M_i, __m);
411  }
412 
413  _GLIBCXX_ALWAYS_INLINE __int_type
414  exchange(__int_type __i,
415  memory_order __m = memory_order_seq_cst) noexcept
416  {
417  return __atomic_exchange_n(&_M_i, __i, __m);
418  }
419 
420 
421  _GLIBCXX_ALWAYS_INLINE __int_type
422  exchange(__int_type __i,
423  memory_order __m = memory_order_seq_cst) volatile noexcept
424  {
425  return __atomic_exchange_n(&_M_i, __i, __m);
426  }
427 
428  _GLIBCXX_ALWAYS_INLINE bool
429  compare_exchange_weak(__int_type& __i1, __int_type __i2,
430  memory_order __m1, memory_order __m2) noexcept
431  {
432  memory_order __b2 = __m2 & __memory_order_mask;
433  memory_order __b1 = __m1 & __memory_order_mask;
434  __glibcxx_assert(__b2 != memory_order_release);
435  __glibcxx_assert(__b2 != memory_order_acq_rel);
436  __glibcxx_assert(__b2 <= __b1);
437 
438  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
439  }
440 
441  _GLIBCXX_ALWAYS_INLINE bool
442  compare_exchange_weak(__int_type& __i1, __int_type __i2,
443  memory_order __m1,
444  memory_order __m2) volatile noexcept
445  {
446  memory_order __b2 = __m2 & __memory_order_mask;
447  memory_order __b1 = __m1 & __memory_order_mask;
448  __glibcxx_assert(__b2 != memory_order_release);
449  __glibcxx_assert(__b2 != memory_order_acq_rel);
450  __glibcxx_assert(__b2 <= __b1);
451 
452  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
453  }
454 
455  _GLIBCXX_ALWAYS_INLINE bool
456  compare_exchange_weak(__int_type& __i1, __int_type __i2,
457  memory_order __m = memory_order_seq_cst) noexcept
458  {
459  return compare_exchange_weak(__i1, __i2, __m,
460  __cmpexch_failure_order(__m));
461  }
462 
463  _GLIBCXX_ALWAYS_INLINE bool
464  compare_exchange_weak(__int_type& __i1, __int_type __i2,
465  memory_order __m = memory_order_seq_cst) volatile noexcept
466  {
467  return compare_exchange_weak(__i1, __i2, __m,
468  __cmpexch_failure_order(__m));
469  }
470 
471  _GLIBCXX_ALWAYS_INLINE bool
472  compare_exchange_strong(__int_type& __i1, __int_type __i2,
473  memory_order __m1, memory_order __m2) noexcept
474  {
475  memory_order __b2 = __m2 & __memory_order_mask;
476  memory_order __b1 = __m1 & __memory_order_mask;
477  __glibcxx_assert(__b2 != memory_order_release);
478  __glibcxx_assert(__b2 != memory_order_acq_rel);
479  __glibcxx_assert(__b2 <= __b1);
480 
481  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
482  }
483 
484  _GLIBCXX_ALWAYS_INLINE bool
485  compare_exchange_strong(__int_type& __i1, __int_type __i2,
486  memory_order __m1,
487  memory_order __m2) volatile noexcept
488  {
489  memory_order __b2 = __m2 & __memory_order_mask;
490  memory_order __b1 = __m1 & __memory_order_mask;
491 
492  __glibcxx_assert(__b2 != memory_order_release);
493  __glibcxx_assert(__b2 != memory_order_acq_rel);
494  __glibcxx_assert(__b2 <= __b1);
495 
496  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
497  }
498 
499  _GLIBCXX_ALWAYS_INLINE bool
500  compare_exchange_strong(__int_type& __i1, __int_type __i2,
501  memory_order __m = memory_order_seq_cst) noexcept
502  {
503  return compare_exchange_strong(__i1, __i2, __m,
504  __cmpexch_failure_order(__m));
505  }
506 
507  _GLIBCXX_ALWAYS_INLINE bool
508  compare_exchange_strong(__int_type& __i1, __int_type __i2,
509  memory_order __m = memory_order_seq_cst) volatile noexcept
510  {
511  return compare_exchange_strong(__i1, __i2, __m,
512  __cmpexch_failure_order(__m));
513  }
514 
515  _GLIBCXX_ALWAYS_INLINE __int_type
516  fetch_add(__int_type __i,
517  memory_order __m = memory_order_seq_cst) noexcept
518  { return __atomic_fetch_add(&_M_i, __i, __m); }
519 
520  _GLIBCXX_ALWAYS_INLINE __int_type
521  fetch_add(__int_type __i,
522  memory_order __m = memory_order_seq_cst) volatile noexcept
523  { return __atomic_fetch_add(&_M_i, __i, __m); }
524 
525  _GLIBCXX_ALWAYS_INLINE __int_type
526  fetch_sub(__int_type __i,
527  memory_order __m = memory_order_seq_cst) noexcept
528  { return __atomic_fetch_sub(&_M_i, __i, __m); }
529 
530  _GLIBCXX_ALWAYS_INLINE __int_type
531  fetch_sub(__int_type __i,
532  memory_order __m = memory_order_seq_cst) volatile noexcept
533  { return __atomic_fetch_sub(&_M_i, __i, __m); }
534 
535  _GLIBCXX_ALWAYS_INLINE __int_type
536  fetch_and(__int_type __i,
537  memory_order __m = memory_order_seq_cst) noexcept
538  { return __atomic_fetch_and(&_M_i, __i, __m); }
539 
540  _GLIBCXX_ALWAYS_INLINE __int_type
541  fetch_and(__int_type __i,
542  memory_order __m = memory_order_seq_cst) volatile noexcept
543  { return __atomic_fetch_and(&_M_i, __i, __m); }
544 
545  _GLIBCXX_ALWAYS_INLINE __int_type
546  fetch_or(__int_type __i,
547  memory_order __m = memory_order_seq_cst) noexcept
548  { return __atomic_fetch_or(&_M_i, __i, __m); }
549 
550  _GLIBCXX_ALWAYS_INLINE __int_type
551  fetch_or(__int_type __i,
552  memory_order __m = memory_order_seq_cst) volatile noexcept
553  { return __atomic_fetch_or(&_M_i, __i, __m); }
554 
555  _GLIBCXX_ALWAYS_INLINE __int_type
556  fetch_xor(__int_type __i,
557  memory_order __m = memory_order_seq_cst) noexcept
558  { return __atomic_fetch_xor(&_M_i, __i, __m); }
559 
560  _GLIBCXX_ALWAYS_INLINE __int_type
561  fetch_xor(__int_type __i,
562  memory_order __m = memory_order_seq_cst) volatile noexcept
563  { return __atomic_fetch_xor(&_M_i, __i, __m); }
564  };
565 
566 
567  /// Partial specialization for pointer types.
568  template<typename _PTp>
569  struct __atomic_base<_PTp*>
570  {
571  private:
572  typedef _PTp* __pointer_type;
573 
574  __pointer_type _M_p;
575 
576  // Factored out to facilitate explicit specialization.
577  constexpr ptrdiff_t
578  _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
579 
580  constexpr ptrdiff_t
581  _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
582 
583  public:
584  __atomic_base() noexcept = default;
585  ~__atomic_base() noexcept = default;
586  __atomic_base(const __atomic_base&) = delete;
587  __atomic_base& operator=(const __atomic_base&) = delete;
588  __atomic_base& operator=(const __atomic_base&) volatile = delete;
589 
590  // Requires __pointer_type convertible to _M_p.
591  constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
592 
593  operator __pointer_type() const noexcept
594  { return load(); }
595 
596  operator __pointer_type() const volatile noexcept
597  { return load(); }
598 
599  __pointer_type
600  operator=(__pointer_type __p) noexcept
601  {
602  store(__p);
603  return __p;
604  }
605 
606  __pointer_type
607  operator=(__pointer_type __p) volatile noexcept
608  {
609  store(__p);
610  return __p;
611  }
612 
613  __pointer_type
614  operator++(int) noexcept
615  { return fetch_add(1); }
616 
617  __pointer_type
618  operator++(int) volatile noexcept
619  { return fetch_add(1); }
620 
621  __pointer_type
622  operator--(int) noexcept
623  { return fetch_sub(1); }
624 
625  __pointer_type
626  operator--(int) volatile noexcept
627  { return fetch_sub(1); }
628 
629  __pointer_type
630  operator++() noexcept
631  { return __atomic_add_fetch(&_M_p, _M_type_size(1),
632  memory_order_seq_cst); }
633 
634  __pointer_type
635  operator++() volatile noexcept
636  { return __atomic_add_fetch(&_M_p, _M_type_size(1),
637  memory_order_seq_cst); }
638 
639  __pointer_type
640  operator--() noexcept
641  { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
642  memory_order_seq_cst); }
643 
644  __pointer_type
645  operator--() volatile noexcept
646  { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
647  memory_order_seq_cst); }
648 
649  __pointer_type
650  operator+=(ptrdiff_t __d) noexcept
651  { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
652  memory_order_seq_cst); }
653 
654  __pointer_type
655  operator+=(ptrdiff_t __d) volatile noexcept
656  { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
657  memory_order_seq_cst); }
658 
659  __pointer_type
660  operator-=(ptrdiff_t __d) noexcept
661  { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
662  memory_order_seq_cst); }
663 
664  __pointer_type
665  operator-=(ptrdiff_t __d) volatile noexcept
666  { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
667  memory_order_seq_cst); }
668 
669  bool
670  is_lock_free() const noexcept
671  {
672  // Produce a fake, minimally aligned pointer.
673  return __atomic_is_lock_free(sizeof(_M_p),
674  reinterpret_cast<void *>(-__alignof(_M_p)));
675  }
676 
677  bool
678  is_lock_free() const volatile noexcept
679  {
680  // Produce a fake, minimally aligned pointer.
681  return __atomic_is_lock_free(sizeof(_M_p),
682  reinterpret_cast<void *>(-__alignof(_M_p)));
683  }
684 
685  _GLIBCXX_ALWAYS_INLINE void
686  store(__pointer_type __p,
687  memory_order __m = memory_order_seq_cst) noexcept
688  {
689  memory_order __b = __m & __memory_order_mask;
690 
691  __glibcxx_assert(__b != memory_order_acquire);
692  __glibcxx_assert(__b != memory_order_acq_rel);
693  __glibcxx_assert(__b != memory_order_consume);
694 
695  __atomic_store_n(&_M_p, __p, __m);
696  }
697 
698  _GLIBCXX_ALWAYS_INLINE void
699  store(__pointer_type __p,
700  memory_order __m = memory_order_seq_cst) volatile noexcept
701  {
702  memory_order __b = __m & __memory_order_mask;
703  __glibcxx_assert(__b != memory_order_acquire);
704  __glibcxx_assert(__b != memory_order_acq_rel);
705  __glibcxx_assert(__b != memory_order_consume);
706 
707  __atomic_store_n(&_M_p, __p, __m);
708  }
709 
710  _GLIBCXX_ALWAYS_INLINE __pointer_type
711  load(memory_order __m = memory_order_seq_cst) const noexcept
712  {
713  memory_order __b = __m & __memory_order_mask;
714  __glibcxx_assert(__b != memory_order_release);
715  __glibcxx_assert(__b != memory_order_acq_rel);
716 
717  return __atomic_load_n(&_M_p, __m);
718  }
719 
720  _GLIBCXX_ALWAYS_INLINE __pointer_type
721  load(memory_order __m = memory_order_seq_cst) const volatile noexcept
722  {
723  memory_order __b = __m & __memory_order_mask;
724  __glibcxx_assert(__b != memory_order_release);
725  __glibcxx_assert(__b != memory_order_acq_rel);
726 
727  return __atomic_load_n(&_M_p, __m);
728  }
729 
730  _GLIBCXX_ALWAYS_INLINE __pointer_type
731  exchange(__pointer_type __p,
732  memory_order __m = memory_order_seq_cst) noexcept
733  {
734  return __atomic_exchange_n(&_M_p, __p, __m);
735  }
736 
737 
738  _GLIBCXX_ALWAYS_INLINE __pointer_type
739  exchange(__pointer_type __p,
740  memory_order __m = memory_order_seq_cst) volatile noexcept
741  {
742  return __atomic_exchange_n(&_M_p, __p, __m);
743  }
744 
745  _GLIBCXX_ALWAYS_INLINE bool
746  compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
747  memory_order __m1,
748  memory_order __m2) noexcept
749  {
750  memory_order __b2 = __m2 & __memory_order_mask;
751  memory_order __b1 = __m1 & __memory_order_mask;
752  __glibcxx_assert(__b2 != memory_order_release);
753  __glibcxx_assert(__b2 != memory_order_acq_rel);
754  __glibcxx_assert(__b2 <= __b1);
755 
756  return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
757  }
758 
759  _GLIBCXX_ALWAYS_INLINE bool
760  compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
761  memory_order __m1,
762  memory_order __m2) volatile noexcept
763  {
764  memory_order __b2 = __m2 & __memory_order_mask;
765  memory_order __b1 = __m1 & __memory_order_mask;
766 
767  __glibcxx_assert(__b2 != memory_order_release);
768  __glibcxx_assert(__b2 != memory_order_acq_rel);
769  __glibcxx_assert(__b2 <= __b1);
770 
771  return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
772  }
773 
774  _GLIBCXX_ALWAYS_INLINE __pointer_type
775  fetch_add(ptrdiff_t __d,
776  memory_order __m = memory_order_seq_cst) noexcept
777  { return __atomic_fetch_add(&_M_p, _M_type_size(__d), __m); }
778 
779  _GLIBCXX_ALWAYS_INLINE __pointer_type
780  fetch_add(ptrdiff_t __d,
781  memory_order __m = memory_order_seq_cst) volatile noexcept
782  { return __atomic_fetch_add(&_M_p, _M_type_size(__d), __m); }
783 
784  _GLIBCXX_ALWAYS_INLINE __pointer_type
785  fetch_sub(ptrdiff_t __d,
786  memory_order __m = memory_order_seq_cst) noexcept
787  { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), __m); }
788 
789  _GLIBCXX_ALWAYS_INLINE __pointer_type
790  fetch_sub(ptrdiff_t __d,
791  memory_order __m = memory_order_seq_cst) volatile noexcept
792  { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), __m); }
793  };
794 
795  // @} group atomics
796 
797 _GLIBCXX_END_NAMESPACE_VERSION
798 } // namespace std
799 
800 #endif
Base type for atomic_flag.
Definition: atomic_base.h:150
bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition: bitset:1433
Base class for atomic integrals.
Definition: atomic_base.h:120
ISO C++ entities toplevel namespace is std.
atomic_flag
Definition: atomic_base.h:160
Generic atomic type, primary class template.
Definition: atomic:58
_Tp exchange(_Tp &__obj, _Up &&__new_val)
Assign __new_val to __obj and return its previous value.
Definition: utility:286
bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition: bitset:1442
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
Definition: atomic_base.h:111
memory_order
Enumeration for memory_order.
Definition: atomic_base.h:55