libstdc++
atomic_base.h
Go to the documentation of this file.
1 // -*- C++ -*- header.
2 
3 // Copyright (C) 2008-2021 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/atomic_base.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{atomic}
28  */
29 
30 #ifndef _GLIBCXX_ATOMIC_BASE_H
31 #define _GLIBCXX_ATOMIC_BASE_H 1
32 
33 #pragma GCC system_header
34 
35 #include <bits/c++config.h>
36 #include <stdint.h>
38 #include <bits/move.h>
39 
40 #if __cplusplus > 201703L
41 #include <bits/atomic_wait.h>
42 #endif
43 
44 #ifndef _GLIBCXX_ALWAYS_INLINE
45 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
46 #endif
47 
48 namespace std _GLIBCXX_VISIBILITY(default)
49 {
50 _GLIBCXX_BEGIN_NAMESPACE_VERSION
51 
52  /**
53  * @defgroup atomics Atomics
54  *
55  * Components for performing atomic operations.
56  * @{
57  */
58 
59  /// Enumeration for memory_order
60 #if __cplusplus > 201703L
61  enum class memory_order : int
62  {
63  relaxed,
64  consume,
65  acquire,
66  release,
67  acq_rel,
68  seq_cst
69  };
70 
71  inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
72  inline constexpr memory_order memory_order_consume = memory_order::consume;
73  inline constexpr memory_order memory_order_acquire = memory_order::acquire;
74  inline constexpr memory_order memory_order_release = memory_order::release;
75  inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
76  inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
77 #else
78  typedef enum memory_order
79  {
80  memory_order_relaxed,
81  memory_order_consume,
82  memory_order_acquire,
83  memory_order_release,
84  memory_order_acq_rel,
85  memory_order_seq_cst
87 #endif
88 
89  enum __memory_order_modifier
90  {
91  __memory_order_mask = 0x0ffff,
92  __memory_order_modifier_mask = 0xffff0000,
93  __memory_order_hle_acquire = 0x10000,
94  __memory_order_hle_release = 0x20000
95  };
96 
97  constexpr memory_order
98  operator|(memory_order __m, __memory_order_modifier __mod)
99  {
100  return memory_order(int(__m) | int(__mod));
101  }
102 
103  constexpr memory_order
104  operator&(memory_order __m, __memory_order_modifier __mod)
105  {
106  return memory_order(int(__m) & int(__mod));
107  }
108 
109  // Drop release ordering as per [atomics.types.operations.req]/21
110  constexpr memory_order
111  __cmpexch_failure_order2(memory_order __m) noexcept
112  {
113  return __m == memory_order_acq_rel ? memory_order_acquire
114  : __m == memory_order_release ? memory_order_relaxed : __m;
115  }
116 
117  constexpr memory_order
118  __cmpexch_failure_order(memory_order __m) noexcept
119  {
120  return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
121  | __memory_order_modifier(__m & __memory_order_modifier_mask));
122  }
123 
124  _GLIBCXX_ALWAYS_INLINE void
125  atomic_thread_fence(memory_order __m) noexcept
126  { __atomic_thread_fence(int(__m)); }
127 
128  _GLIBCXX_ALWAYS_INLINE void
129  atomic_signal_fence(memory_order __m) noexcept
130  { __atomic_signal_fence(int(__m)); }
131 
132  /// kill_dependency
133  template<typename _Tp>
134  inline _Tp
135  kill_dependency(_Tp __y) noexcept
136  {
137  _Tp __ret(__y);
138  return __ret;
139  }
140 
141  // Base types for atomics.
142  template<typename _IntTp>
143  struct __atomic_base;
144 
145 #if __cplusplus <= 201703L
146 # define _GLIBCXX20_INIT(I)
147 #else
148 # define __cpp_lib_atomic_value_initialization 201911L
149 # define _GLIBCXX20_INIT(I) = I
150 #endif
151 
152 #define ATOMIC_VAR_INIT(_VI) { _VI }
153 
154  template<typename _Tp>
155  struct atomic;
156 
157  template<typename _Tp>
158  struct atomic<_Tp*>;
159 
160  /* The target's "set" value for test-and-set may not be exactly 1. */
161 #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
162  typedef bool __atomic_flag_data_type;
163 #else
164  typedef unsigned char __atomic_flag_data_type;
165 #endif
166 
167  /**
168  * @brief Base type for atomic_flag.
169  *
170  * Base type is POD with data, allowing atomic_flag to derive from
171  * it and meet the standard layout type requirement. In addition to
172  * compatibility with a C interface, this allows different
173  * implementations of atomic_flag to use the same atomic operation
174  * functions, via a standard conversion to the __atomic_flag_base
175  * argument.
176  */
177  _GLIBCXX_BEGIN_EXTERN_C
178 
180  {
181  __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
182  };
183 
184  _GLIBCXX_END_EXTERN_C
185 
186 #define ATOMIC_FLAG_INIT { 0 }
187 
188  /// atomic_flag
190  {
191  atomic_flag() noexcept = default;
192  ~atomic_flag() noexcept = default;
193  atomic_flag(const atomic_flag&) = delete;
194  atomic_flag& operator=(const atomic_flag&) = delete;
195  atomic_flag& operator=(const atomic_flag&) volatile = delete;
196 
197  // Conversion to ATOMIC_FLAG_INIT.
198  constexpr atomic_flag(bool __i) noexcept
199  : __atomic_flag_base{ _S_init(__i) }
200  { }
201 
202  _GLIBCXX_ALWAYS_INLINE bool
203  test_and_set(memory_order __m = memory_order_seq_cst) noexcept
204  {
205  return __atomic_test_and_set (&_M_i, int(__m));
206  }
207 
208  _GLIBCXX_ALWAYS_INLINE bool
209  test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
210  {
211  return __atomic_test_and_set (&_M_i, int(__m));
212  }
213 
214 #if __cplusplus > 201703L
215 #define __cpp_lib_atomic_flag_test 201907L
216 
217  _GLIBCXX_ALWAYS_INLINE bool
218  test(memory_order __m = memory_order_seq_cst) const noexcept
219  {
220  __atomic_flag_data_type __v;
221  __atomic_load(&_M_i, &__v, int(__m));
222  return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
223  }
224 
225  _GLIBCXX_ALWAYS_INLINE bool
226  test(memory_order __m = memory_order_seq_cst) const volatile noexcept
227  {
228  __atomic_flag_data_type __v;
229  __atomic_load(&_M_i, &__v, int(__m));
230  return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
231  }
232 
233 #if __cpp_lib_atomic_wait
234  _GLIBCXX_ALWAYS_INLINE void
235  wait(bool __old,
236  memory_order __m = memory_order_seq_cst) const noexcept
237  {
238  std::__atomic_wait(&_M_i, static_cast<__atomic_flag_data_type>(__old),
239  [__m, this, __old]()
240  { return this->test(__m) != __old; });
241  }
242 
243  // TODO add const volatile overload
244 
245  _GLIBCXX_ALWAYS_INLINE void
246  notify_one() const noexcept
247  { std::__atomic_notify(&_M_i, false); }
248 
249  // TODO add const volatile overload
250 
251  _GLIBCXX_ALWAYS_INLINE void
252  notify_all() const noexcept
253  { std::__atomic_notify(&_M_i, true); }
254 
255  // TODO add const volatile overload
256 #endif // __cpp_lib_atomic_wait
257 #endif // C++20
258 
259  _GLIBCXX_ALWAYS_INLINE void
260  clear(memory_order __m = memory_order_seq_cst) noexcept
261  {
262  memory_order __b __attribute__ ((__unused__))
263  = __m & __memory_order_mask;
264  __glibcxx_assert(__b != memory_order_consume);
265  __glibcxx_assert(__b != memory_order_acquire);
266  __glibcxx_assert(__b != memory_order_acq_rel);
267 
268  __atomic_clear (&_M_i, int(__m));
269  }
270 
271  _GLIBCXX_ALWAYS_INLINE void
272  clear(memory_order __m = memory_order_seq_cst) volatile noexcept
273  {
274  memory_order __b __attribute__ ((__unused__))
275  = __m & __memory_order_mask;
276  __glibcxx_assert(__b != memory_order_consume);
277  __glibcxx_assert(__b != memory_order_acquire);
278  __glibcxx_assert(__b != memory_order_acq_rel);
279 
280  __atomic_clear (&_M_i, int(__m));
281  }
282 
283  private:
284  static constexpr __atomic_flag_data_type
285  _S_init(bool __i)
286  { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
287  };
288 
289 
290  /// Base class for atomic integrals.
291  //
292  // For each of the integral types, define atomic_[integral type] struct
293  //
294  // atomic_bool bool
295  // atomic_char char
296  // atomic_schar signed char
297  // atomic_uchar unsigned char
298  // atomic_short short
299  // atomic_ushort unsigned short
300  // atomic_int int
301  // atomic_uint unsigned int
302  // atomic_long long
303  // atomic_ulong unsigned long
304  // atomic_llong long long
305  // atomic_ullong unsigned long long
306  // atomic_char8_t char8_t
307  // atomic_char16_t char16_t
308  // atomic_char32_t char32_t
309  // atomic_wchar_t wchar_t
310  //
311  // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
312  // 8 bytes, since that is what GCC built-in functions for atomic
313  // memory access expect.
314  template<typename _ITp>
316  {
317  using value_type = _ITp;
318  using difference_type = value_type;
319 
320  private:
321  typedef _ITp __int_type;
322 
323  static constexpr int _S_alignment =
324  sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
325 
326  alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
327 
328  public:
329  __atomic_base() noexcept = default;
330  ~__atomic_base() noexcept = default;
331  __atomic_base(const __atomic_base&) = delete;
332  __atomic_base& operator=(const __atomic_base&) = delete;
333  __atomic_base& operator=(const __atomic_base&) volatile = delete;
334 
335  // Requires __int_type convertible to _M_i.
336  constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
337 
338  operator __int_type() const noexcept
339  { return load(); }
340 
341  operator __int_type() const volatile noexcept
342  { return load(); }
343 
344  __int_type
345  operator=(__int_type __i) noexcept
346  {
347  store(__i);
348  return __i;
349  }
350 
351  __int_type
352  operator=(__int_type __i) volatile noexcept
353  {
354  store(__i);
355  return __i;
356  }
357 
358  __int_type
359  operator++(int) noexcept
360  { return fetch_add(1); }
361 
362  __int_type
363  operator++(int) volatile noexcept
364  { return fetch_add(1); }
365 
366  __int_type
367  operator--(int) noexcept
368  { return fetch_sub(1); }
369 
370  __int_type
371  operator--(int) volatile noexcept
372  { return fetch_sub(1); }
373 
374  __int_type
375  operator++() noexcept
376  { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
377 
378  __int_type
379  operator++() volatile noexcept
380  { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
381 
382  __int_type
383  operator--() noexcept
384  { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
385 
386  __int_type
387  operator--() volatile noexcept
388  { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
389 
390  __int_type
391  operator+=(__int_type __i) noexcept
392  { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
393 
394  __int_type
395  operator+=(__int_type __i) volatile noexcept
396  { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
397 
398  __int_type
399  operator-=(__int_type __i) noexcept
400  { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
401 
402  __int_type
403  operator-=(__int_type __i) volatile noexcept
404  { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
405 
406  __int_type
407  operator&=(__int_type __i) noexcept
408  { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
409 
410  __int_type
411  operator&=(__int_type __i) volatile noexcept
412  { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
413 
414  __int_type
415  operator|=(__int_type __i) noexcept
416  { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
417 
418  __int_type
419  operator|=(__int_type __i) volatile noexcept
420  { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
421 
422  __int_type
423  operator^=(__int_type __i) noexcept
424  { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
425 
426  __int_type
427  operator^=(__int_type __i) volatile noexcept
428  { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
429 
430  bool
431  is_lock_free() const noexcept
432  {
433  // Use a fake, minimally aligned pointer.
434  return __atomic_is_lock_free(sizeof(_M_i),
435  reinterpret_cast<void *>(-_S_alignment));
436  }
437 
438  bool
439  is_lock_free() const volatile noexcept
440  {
441  // Use a fake, minimally aligned pointer.
442  return __atomic_is_lock_free(sizeof(_M_i),
443  reinterpret_cast<void *>(-_S_alignment));
444  }
445 
446  _GLIBCXX_ALWAYS_INLINE void
447  store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
448  {
449  memory_order __b __attribute__ ((__unused__))
450  = __m & __memory_order_mask;
451  __glibcxx_assert(__b != memory_order_acquire);
452  __glibcxx_assert(__b != memory_order_acq_rel);
453  __glibcxx_assert(__b != memory_order_consume);
454 
455  __atomic_store_n(&_M_i, __i, int(__m));
456  }
457 
458  _GLIBCXX_ALWAYS_INLINE void
459  store(__int_type __i,
460  memory_order __m = memory_order_seq_cst) volatile noexcept
461  {
462  memory_order __b __attribute__ ((__unused__))
463  = __m & __memory_order_mask;
464  __glibcxx_assert(__b != memory_order_acquire);
465  __glibcxx_assert(__b != memory_order_acq_rel);
466  __glibcxx_assert(__b != memory_order_consume);
467 
468  __atomic_store_n(&_M_i, __i, int(__m));
469  }
470 
471  _GLIBCXX_ALWAYS_INLINE __int_type
472  load(memory_order __m = memory_order_seq_cst) const noexcept
473  {
474  memory_order __b __attribute__ ((__unused__))
475  = __m & __memory_order_mask;
476  __glibcxx_assert(__b != memory_order_release);
477  __glibcxx_assert(__b != memory_order_acq_rel);
478 
479  return __atomic_load_n(&_M_i, int(__m));
480  }
481 
482  _GLIBCXX_ALWAYS_INLINE __int_type
483  load(memory_order __m = memory_order_seq_cst) const volatile noexcept
484  {
485  memory_order __b __attribute__ ((__unused__))
486  = __m & __memory_order_mask;
487  __glibcxx_assert(__b != memory_order_release);
488  __glibcxx_assert(__b != memory_order_acq_rel);
489 
490  return __atomic_load_n(&_M_i, int(__m));
491  }
492 
493  _GLIBCXX_ALWAYS_INLINE __int_type
494  exchange(__int_type __i,
495  memory_order __m = memory_order_seq_cst) noexcept
496  {
497  return __atomic_exchange_n(&_M_i, __i, int(__m));
498  }
499 
500 
501  _GLIBCXX_ALWAYS_INLINE __int_type
502  exchange(__int_type __i,
503  memory_order __m = memory_order_seq_cst) volatile noexcept
504  {
505  return __atomic_exchange_n(&_M_i, __i, int(__m));
506  }
507 
508  _GLIBCXX_ALWAYS_INLINE bool
509  compare_exchange_weak(__int_type& __i1, __int_type __i2,
510  memory_order __m1, memory_order __m2) noexcept
511  {
512  memory_order __b2 __attribute__ ((__unused__))
513  = __m2 & __memory_order_mask;
514  memory_order __b1 __attribute__ ((__unused__))
515  = __m1 & __memory_order_mask;
516  __glibcxx_assert(__b2 != memory_order_release);
517  __glibcxx_assert(__b2 != memory_order_acq_rel);
518  __glibcxx_assert(__b2 <= __b1);
519 
520  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
521  int(__m1), int(__m2));
522  }
523 
524  _GLIBCXX_ALWAYS_INLINE bool
525  compare_exchange_weak(__int_type& __i1, __int_type __i2,
526  memory_order __m1,
527  memory_order __m2) volatile noexcept
528  {
529  memory_order __b2 __attribute__ ((__unused__))
530  = __m2 & __memory_order_mask;
531  memory_order __b1 __attribute__ ((__unused__))
532  = __m1 & __memory_order_mask;
533  __glibcxx_assert(__b2 != memory_order_release);
534  __glibcxx_assert(__b2 != memory_order_acq_rel);
535  __glibcxx_assert(__b2 <= __b1);
536 
537  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
538  int(__m1), int(__m2));
539  }
540 
541  _GLIBCXX_ALWAYS_INLINE bool
542  compare_exchange_weak(__int_type& __i1, __int_type __i2,
543  memory_order __m = memory_order_seq_cst) noexcept
544  {
545  return compare_exchange_weak(__i1, __i2, __m,
546  __cmpexch_failure_order(__m));
547  }
548 
549  _GLIBCXX_ALWAYS_INLINE bool
550  compare_exchange_weak(__int_type& __i1, __int_type __i2,
551  memory_order __m = memory_order_seq_cst) volatile noexcept
552  {
553  return compare_exchange_weak(__i1, __i2, __m,
554  __cmpexch_failure_order(__m));
555  }
556 
557  _GLIBCXX_ALWAYS_INLINE bool
558  compare_exchange_strong(__int_type& __i1, __int_type __i2,
559  memory_order __m1, memory_order __m2) noexcept
560  {
561  memory_order __b2 __attribute__ ((__unused__))
562  = __m2 & __memory_order_mask;
563  memory_order __b1 __attribute__ ((__unused__))
564  = __m1 & __memory_order_mask;
565  __glibcxx_assert(__b2 != memory_order_release);
566  __glibcxx_assert(__b2 != memory_order_acq_rel);
567  __glibcxx_assert(__b2 <= __b1);
568 
569  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
570  int(__m1), int(__m2));
571  }
572 
573  _GLIBCXX_ALWAYS_INLINE bool
574  compare_exchange_strong(__int_type& __i1, __int_type __i2,
575  memory_order __m1,
576  memory_order __m2) volatile noexcept
577  {
578  memory_order __b2 __attribute__ ((__unused__))
579  = __m2 & __memory_order_mask;
580  memory_order __b1 __attribute__ ((__unused__))
581  = __m1 & __memory_order_mask;
582 
583  __glibcxx_assert(__b2 != memory_order_release);
584  __glibcxx_assert(__b2 != memory_order_acq_rel);
585  __glibcxx_assert(__b2 <= __b1);
586 
587  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
588  int(__m1), int(__m2));
589  }
590 
591  _GLIBCXX_ALWAYS_INLINE bool
592  compare_exchange_strong(__int_type& __i1, __int_type __i2,
593  memory_order __m = memory_order_seq_cst) noexcept
594  {
595  return compare_exchange_strong(__i1, __i2, __m,
596  __cmpexch_failure_order(__m));
597  }
598 
599  _GLIBCXX_ALWAYS_INLINE bool
600  compare_exchange_strong(__int_type& __i1, __int_type __i2,
601  memory_order __m = memory_order_seq_cst) volatile noexcept
602  {
603  return compare_exchange_strong(__i1, __i2, __m,
604  __cmpexch_failure_order(__m));
605  }
606 
607 #if __cpp_lib_atomic_wait
608  _GLIBCXX_ALWAYS_INLINE void
609  wait(__int_type __old,
610  memory_order __m = memory_order_seq_cst) const noexcept
611  {
612  std::__atomic_wait(&_M_i, __old,
613  [__m, this, __old]
614  { return this->load(__m) != __old; });
615  }
616 
617  // TODO add const volatile overload
618 
619  _GLIBCXX_ALWAYS_INLINE void
620  notify_one() const noexcept
621  { std::__atomic_notify(&_M_i, false); }
622 
623  // TODO add const volatile overload
624 
625  _GLIBCXX_ALWAYS_INLINE void
626  notify_all() const noexcept
627  { std::__atomic_notify(&_M_i, true); }
628 
629  // TODO add const volatile overload
630 #endif // __cpp_lib_atomic_wait
631 
632  _GLIBCXX_ALWAYS_INLINE __int_type
633  fetch_add(__int_type __i,
634  memory_order __m = memory_order_seq_cst) noexcept
635  { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
636 
637  _GLIBCXX_ALWAYS_INLINE __int_type
638  fetch_add(__int_type __i,
639  memory_order __m = memory_order_seq_cst) volatile noexcept
640  { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
641 
642  _GLIBCXX_ALWAYS_INLINE __int_type
643  fetch_sub(__int_type __i,
644  memory_order __m = memory_order_seq_cst) noexcept
645  { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
646 
647  _GLIBCXX_ALWAYS_INLINE __int_type
648  fetch_sub(__int_type __i,
649  memory_order __m = memory_order_seq_cst) volatile noexcept
650  { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
651 
652  _GLIBCXX_ALWAYS_INLINE __int_type
653  fetch_and(__int_type __i,
654  memory_order __m = memory_order_seq_cst) noexcept
655  { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
656 
657  _GLIBCXX_ALWAYS_INLINE __int_type
658  fetch_and(__int_type __i,
659  memory_order __m = memory_order_seq_cst) volatile noexcept
660  { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
661 
662  _GLIBCXX_ALWAYS_INLINE __int_type
663  fetch_or(__int_type __i,
664  memory_order __m = memory_order_seq_cst) noexcept
665  { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
666 
667  _GLIBCXX_ALWAYS_INLINE __int_type
668  fetch_or(__int_type __i,
669  memory_order __m = memory_order_seq_cst) volatile noexcept
670  { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
671 
672  _GLIBCXX_ALWAYS_INLINE __int_type
673  fetch_xor(__int_type __i,
674  memory_order __m = memory_order_seq_cst) noexcept
675  { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
676 
677  _GLIBCXX_ALWAYS_INLINE __int_type
678  fetch_xor(__int_type __i,
679  memory_order __m = memory_order_seq_cst) volatile noexcept
680  { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
681  };
682 
683 
684  /// Partial specialization for pointer types.
685  template<typename _PTp>
686  struct __atomic_base<_PTp*>
687  {
688  private:
689  typedef _PTp* __pointer_type;
690 
691  __pointer_type _M_p _GLIBCXX20_INIT(nullptr);
692 
693  // Factored out to facilitate explicit specialization.
694  constexpr ptrdiff_t
695  _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
696 
697  constexpr ptrdiff_t
698  _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
699 
700  public:
701  __atomic_base() noexcept = default;
702  ~__atomic_base() noexcept = default;
703  __atomic_base(const __atomic_base&) = delete;
704  __atomic_base& operator=(const __atomic_base&) = delete;
705  __atomic_base& operator=(const __atomic_base&) volatile = delete;
706 
707  // Requires __pointer_type convertible to _M_p.
708  constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
709 
710  operator __pointer_type() const noexcept
711  { return load(); }
712 
713  operator __pointer_type() const volatile noexcept
714  { return load(); }
715 
716  __pointer_type
717  operator=(__pointer_type __p) noexcept
718  {
719  store(__p);
720  return __p;
721  }
722 
723  __pointer_type
724  operator=(__pointer_type __p) volatile noexcept
725  {
726  store(__p);
727  return __p;
728  }
729 
730  __pointer_type
731  operator++(int) noexcept
732  { return fetch_add(1); }
733 
734  __pointer_type
735  operator++(int) volatile noexcept
736  { return fetch_add(1); }
737 
738  __pointer_type
739  operator--(int) noexcept
740  { return fetch_sub(1); }
741 
742  __pointer_type
743  operator--(int) volatile noexcept
744  { return fetch_sub(1); }
745 
746  __pointer_type
747  operator++() noexcept
748  { return __atomic_add_fetch(&_M_p, _M_type_size(1),
749  int(memory_order_seq_cst)); }
750 
751  __pointer_type
752  operator++() volatile noexcept
753  { return __atomic_add_fetch(&_M_p, _M_type_size(1),
754  int(memory_order_seq_cst)); }
755 
756  __pointer_type
757  operator--() noexcept
758  { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
759  int(memory_order_seq_cst)); }
760 
761  __pointer_type
762  operator--() volatile noexcept
763  { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
764  int(memory_order_seq_cst)); }
765 
766  __pointer_type
767  operator+=(ptrdiff_t __d) noexcept
768  { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
769  int(memory_order_seq_cst)); }
770 
771  __pointer_type
772  operator+=(ptrdiff_t __d) volatile noexcept
773  { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
774  int(memory_order_seq_cst)); }
775 
776  __pointer_type
777  operator-=(ptrdiff_t __d) noexcept
778  { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
779  int(memory_order_seq_cst)); }
780 
781  __pointer_type
782  operator-=(ptrdiff_t __d) volatile noexcept
783  { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
784  int(memory_order_seq_cst)); }
785 
786  bool
787  is_lock_free() const noexcept
788  {
789  // Produce a fake, minimally aligned pointer.
790  return __atomic_is_lock_free(sizeof(_M_p),
791  reinterpret_cast<void *>(-__alignof(_M_p)));
792  }
793 
794  bool
795  is_lock_free() const volatile noexcept
796  {
797  // Produce a fake, minimally aligned pointer.
798  return __atomic_is_lock_free(sizeof(_M_p),
799  reinterpret_cast<void *>(-__alignof(_M_p)));
800  }
801 
802  _GLIBCXX_ALWAYS_INLINE void
803  store(__pointer_type __p,
804  memory_order __m = memory_order_seq_cst) noexcept
805  {
806  memory_order __b __attribute__ ((__unused__))
807  = __m & __memory_order_mask;
808 
809  __glibcxx_assert(__b != memory_order_acquire);
810  __glibcxx_assert(__b != memory_order_acq_rel);
811  __glibcxx_assert(__b != memory_order_consume);
812 
813  __atomic_store_n(&_M_p, __p, int(__m));
814  }
815 
816  _GLIBCXX_ALWAYS_INLINE void
817  store(__pointer_type __p,
818  memory_order __m = memory_order_seq_cst) volatile noexcept
819  {
820  memory_order __b __attribute__ ((__unused__))
821  = __m & __memory_order_mask;
822  __glibcxx_assert(__b != memory_order_acquire);
823  __glibcxx_assert(__b != memory_order_acq_rel);
824  __glibcxx_assert(__b != memory_order_consume);
825 
826  __atomic_store_n(&_M_p, __p, int(__m));
827  }
828 
829  _GLIBCXX_ALWAYS_INLINE __pointer_type
830  load(memory_order __m = memory_order_seq_cst) const noexcept
831  {
832  memory_order __b __attribute__ ((__unused__))
833  = __m & __memory_order_mask;
834  __glibcxx_assert(__b != memory_order_release);
835  __glibcxx_assert(__b != memory_order_acq_rel);
836 
837  return __atomic_load_n(&_M_p, int(__m));
838  }
839 
840  _GLIBCXX_ALWAYS_INLINE __pointer_type
841  load(memory_order __m = memory_order_seq_cst) const volatile noexcept
842  {
843  memory_order __b __attribute__ ((__unused__))
844  = __m & __memory_order_mask;
845  __glibcxx_assert(__b != memory_order_release);
846  __glibcxx_assert(__b != memory_order_acq_rel);
847 
848  return __atomic_load_n(&_M_p, int(__m));
849  }
850 
851  _GLIBCXX_ALWAYS_INLINE __pointer_type
852  exchange(__pointer_type __p,
853  memory_order __m = memory_order_seq_cst) noexcept
854  {
855  return __atomic_exchange_n(&_M_p, __p, int(__m));
856  }
857 
858 
859  _GLIBCXX_ALWAYS_INLINE __pointer_type
860  exchange(__pointer_type __p,
861  memory_order __m = memory_order_seq_cst) volatile noexcept
862  {
863  return __atomic_exchange_n(&_M_p, __p, int(__m));
864  }
865 
866  _GLIBCXX_ALWAYS_INLINE bool
867  compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
868  memory_order __m1,
869  memory_order __m2) noexcept
870  {
871  memory_order __b2 __attribute__ ((__unused__))
872  = __m2 & __memory_order_mask;
873  memory_order __b1 __attribute__ ((__unused__))
874  = __m1 & __memory_order_mask;
875  __glibcxx_assert(__b2 != memory_order_release);
876  __glibcxx_assert(__b2 != memory_order_acq_rel);
877  __glibcxx_assert(__b2 <= __b1);
878 
879  return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
880  int(__m1), int(__m2));
881  }
882 
883  _GLIBCXX_ALWAYS_INLINE bool
884  compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
885  memory_order __m1,
886  memory_order __m2) volatile noexcept
887  {
888  memory_order __b2 __attribute__ ((__unused__))
889  = __m2 & __memory_order_mask;
890  memory_order __b1 __attribute__ ((__unused__))
891  = __m1 & __memory_order_mask;
892 
893  __glibcxx_assert(__b2 != memory_order_release);
894  __glibcxx_assert(__b2 != memory_order_acq_rel);
895  __glibcxx_assert(__b2 <= __b1);
896 
897  return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
898  int(__m1), int(__m2));
899  }
900 
901 #if __cpp_lib_atomic_wait
902  _GLIBCXX_ALWAYS_INLINE void
903  wait(__pointer_type __old,
904  memory_order __m = memory_order_seq_cst) noexcept
905  {
906  std::__atomic_wait(&_M_p, __old,
907  [__m, this, __old]()
908  { return this->load(__m) != __old; });
909  }
910 
911  // TODO add const volatile overload
912 
913  _GLIBCXX_ALWAYS_INLINE void
914  notify_one() const noexcept
915  { std::__atomic_notify(&_M_p, false); }
916 
917  // TODO add const volatile overload
918 
919  _GLIBCXX_ALWAYS_INLINE void
920  notify_all() const noexcept
921  { std::__atomic_notify(&_M_p, true); }
922 
923  // TODO add const volatile overload
924 #endif // __cpp_lib_atomic_wait
925 
926  _GLIBCXX_ALWAYS_INLINE __pointer_type
927  fetch_add(ptrdiff_t __d,
928  memory_order __m = memory_order_seq_cst) noexcept
929  { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
930 
931  _GLIBCXX_ALWAYS_INLINE __pointer_type
932  fetch_add(ptrdiff_t __d,
933  memory_order __m = memory_order_seq_cst) volatile noexcept
934  { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
935 
936  _GLIBCXX_ALWAYS_INLINE __pointer_type
937  fetch_sub(ptrdiff_t __d,
938  memory_order __m = memory_order_seq_cst) noexcept
939  { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
940 
941  _GLIBCXX_ALWAYS_INLINE __pointer_type
942  fetch_sub(ptrdiff_t __d,
943  memory_order __m = memory_order_seq_cst) volatile noexcept
944  { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
945  };
946 
947 #if __cplusplus > 201703L
948  // Implementation details of atomic_ref and atomic<floating-point>.
949  namespace __atomic_impl
950  {
951  // Remove volatile and create a non-deduced context for value arguments.
952  template<typename _Tp>
953  using _Val = remove_volatile_t<_Tp>;
954 
955  // As above, but for difference_type arguments.
956  template<typename _Tp>
957  using _Diff = conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
958 
959  template<size_t _Size, size_t _Align>
960  _GLIBCXX_ALWAYS_INLINE bool
961  is_lock_free() noexcept
962  {
963  // Produce a fake, minimally aligned pointer.
964  return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
965  }
966 
967  template<typename _Tp>
968  _GLIBCXX_ALWAYS_INLINE void
969  store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
970  { __atomic_store(__ptr, std::__addressof(__t), int(__m)); }
971 
972  template<typename _Tp>
973  _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
974  load(const _Tp* __ptr, memory_order __m) noexcept
975  {
976  alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
977  auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
978  __atomic_load(__ptr, __dest, int(__m));
979  return *__dest;
980  }
981 
982  template<typename _Tp>
983  _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
984  exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
985  {
986  alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
987  auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
988  __atomic_exchange(__ptr, std::__addressof(__desired), __dest, int(__m));
989  return *__dest;
990  }
991 
992  template<typename _Tp>
993  _GLIBCXX_ALWAYS_INLINE bool
994  compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
995  _Val<_Tp> __desired, memory_order __success,
996  memory_order __failure) noexcept
997  {
998  return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
999  std::__addressof(__desired), true,
1000  int(__success), int(__failure));
1001  }
1002 
1003  template<typename _Tp>
1004  _GLIBCXX_ALWAYS_INLINE bool
1005  compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1006  _Val<_Tp> __desired, memory_order __success,
1007  memory_order __failure) noexcept
1008  {
1009  return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
1010  std::__addressof(__desired), false,
1011  int(__success), int(__failure));
1012  }
1013 
1014 #if __cpp_lib_atomic_wait
1015  template<typename _Tp>
1016  _GLIBCXX_ALWAYS_INLINE void
1017  wait(const _Tp* __ptr, _Val<_Tp> __old,
1018  memory_order __m = memory_order_seq_cst) noexcept
1019  {
1020  std::__atomic_wait(__ptr, __old,
1021  [=]() { return load(__ptr, __m) == __old; });
1022  }
1023 
1024  // TODO add const volatile overload
1025 
1026  template<typename _Tp>
1027  _GLIBCXX_ALWAYS_INLINE void
1028  notify_one(const _Tp* __ptr) noexcept
1029  { std::__atomic_notify(__ptr, false); }
1030 
1031  // TODO add const volatile overload
1032 
1033  template<typename _Tp>
1034  _GLIBCXX_ALWAYS_INLINE void
1035  notify_all(const _Tp* __ptr) noexcept
1036  { std::__atomic_notify(__ptr, true); }
1037 
1038  // TODO add const volatile overload
1039 #endif // __cpp_lib_atomic_wait
1040 
1041  template<typename _Tp>
1042  _GLIBCXX_ALWAYS_INLINE _Tp
1043  fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1044  { return __atomic_fetch_add(__ptr, __i, int(__m)); }
1045 
1046  template<typename _Tp>
1047  _GLIBCXX_ALWAYS_INLINE _Tp
1048  fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1049  { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
1050 
1051  template<typename _Tp>
1052  _GLIBCXX_ALWAYS_INLINE _Tp
1053  fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1054  { return __atomic_fetch_and(__ptr, __i, int(__m)); }
1055 
1056  template<typename _Tp>
1057  _GLIBCXX_ALWAYS_INLINE _Tp
1058  fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1059  { return __atomic_fetch_or(__ptr, __i, int(__m)); }
1060 
1061  template<typename _Tp>
1062  _GLIBCXX_ALWAYS_INLINE _Tp
1063  fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1064  { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
1065 
1066  template<typename _Tp>
1067  _GLIBCXX_ALWAYS_INLINE _Tp
1068  __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1069  { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1070 
1071  template<typename _Tp>
1072  _GLIBCXX_ALWAYS_INLINE _Tp
1073  __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1074  { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1075 
1076  template<typename _Tp>
1077  _GLIBCXX_ALWAYS_INLINE _Tp
1078  __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1079  { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1080 
1081  template<typename _Tp>
1082  _GLIBCXX_ALWAYS_INLINE _Tp
1083  __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1084  { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1085 
1086  template<typename _Tp>
1087  _GLIBCXX_ALWAYS_INLINE _Tp
1088  __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1089  { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1090 
1091  template<typename _Tp>
1092  _Tp
1093  __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1094  {
1095  _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1096  _Val<_Tp> __newval = __oldval + __i;
1097  while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1098  memory_order_relaxed))
1099  __newval = __oldval + __i;
1100  return __oldval;
1101  }
1102 
1103  template<typename _Tp>
1104  _Tp
1105  __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1106  {
1107  _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1108  _Val<_Tp> __newval = __oldval - __i;
1109  while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1110  memory_order_relaxed))
1111  __newval = __oldval - __i;
1112  return __oldval;
1113  }
1114 
1115  template<typename _Tp>
1116  _Tp
1117  __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1118  {
1119  _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1120  _Val<_Tp> __newval = __oldval + __i;
1121  while (!compare_exchange_weak(__ptr, __oldval, __newval,
1122  memory_order_seq_cst,
1123  memory_order_relaxed))
1124  __newval = __oldval + __i;
1125  return __newval;
1126  }
1127 
1128  template<typename _Tp>
1129  _Tp
1130  __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1131  {
1132  _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1133  _Val<_Tp> __newval = __oldval - __i;
1134  while (!compare_exchange_weak(__ptr, __oldval, __newval,
1135  memory_order_seq_cst,
1136  memory_order_relaxed))
1137  __newval = __oldval - __i;
1138  return __newval;
1139  }
1140  } // namespace __atomic_impl
1141 
1142  // base class for atomic<floating-point-type>
1143  template<typename _Fp>
1144  struct __atomic_float
1145  {
1146  static_assert(is_floating_point_v<_Fp>);
1147 
1148  static constexpr size_t _S_alignment = __alignof__(_Fp);
1149 
1150  public:
1151  using value_type = _Fp;
1152  using difference_type = value_type;
1153 
1154  static constexpr bool is_always_lock_free
1155  = __atomic_always_lock_free(sizeof(_Fp), 0);
1156 
1157  __atomic_float() = default;
1158 
1159  constexpr
1160  __atomic_float(_Fp __t) : _M_fp(__t)
1161  { }
1162 
1163  __atomic_float(const __atomic_float&) = delete;
1164  __atomic_float& operator=(const __atomic_float&) = delete;
1165  __atomic_float& operator=(const __atomic_float&) volatile = delete;
1166 
1167  _Fp
1168  operator=(_Fp __t) volatile noexcept
1169  {
1170  this->store(__t);
1171  return __t;
1172  }
1173 
1174  _Fp
1175  operator=(_Fp __t) noexcept
1176  {
1177  this->store(__t);
1178  return __t;
1179  }
1180 
1181  bool
1182  is_lock_free() const volatile noexcept
1183  { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1184 
1185  bool
1186  is_lock_free() const noexcept
1187  { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1188 
1189  void
1190  store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept
1191  { __atomic_impl::store(&_M_fp, __t, __m); }
1192 
1193  void
1194  store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept
1195  { __atomic_impl::store(&_M_fp, __t, __m); }
1196 
1197  _Fp
1198  load(memory_order __m = memory_order_seq_cst) const volatile noexcept
1199  { return __atomic_impl::load(&_M_fp, __m); }
1200 
1201  _Fp
1202  load(memory_order __m = memory_order_seq_cst) const noexcept
1203  { return __atomic_impl::load(&_M_fp, __m); }
1204 
1205  operator _Fp() const volatile noexcept { return this->load(); }
1206  operator _Fp() const noexcept { return this->load(); }
1207 
1208  _Fp
1209  exchange(_Fp __desired,
1210  memory_order __m = memory_order_seq_cst) volatile noexcept
1211  { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1212 
1213  _Fp
1214  exchange(_Fp __desired,
1215  memory_order __m = memory_order_seq_cst) noexcept
1216  { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1217 
1218  bool
1219  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1220  memory_order __success,
1221  memory_order __failure) noexcept
1222  {
1223  return __atomic_impl::compare_exchange_weak(&_M_fp,
1224  __expected, __desired,
1225  __success, __failure);
1226  }
1227 
1228  bool
1229  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1230  memory_order __success,
1231  memory_order __failure) volatile noexcept
1232  {
1233  return __atomic_impl::compare_exchange_weak(&_M_fp,
1234  __expected, __desired,
1235  __success, __failure);
1236  }
1237 
1238  bool
1239  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1240  memory_order __success,
1241  memory_order __failure) noexcept
1242  {
1243  return __atomic_impl::compare_exchange_strong(&_M_fp,
1244  __expected, __desired,
1245  __success, __failure);
1246  }
1247 
1248  bool
1249  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1250  memory_order __success,
1251  memory_order __failure) volatile noexcept
1252  {
1253  return __atomic_impl::compare_exchange_strong(&_M_fp,
1254  __expected, __desired,
1255  __success, __failure);
1256  }
1257 
1258  bool
1259  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1260  memory_order __order = memory_order_seq_cst)
1261  noexcept
1262  {
1263  return compare_exchange_weak(__expected, __desired, __order,
1264  __cmpexch_failure_order(__order));
1265  }
1266 
1267  bool
1268  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1269  memory_order __order = memory_order_seq_cst)
1270  volatile noexcept
1271  {
1272  return compare_exchange_weak(__expected, __desired, __order,
1273  __cmpexch_failure_order(__order));
1274  }
1275 
1276  bool
1277  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1278  memory_order __order = memory_order_seq_cst)
1279  noexcept
1280  {
1281  return compare_exchange_strong(__expected, __desired, __order,
1282  __cmpexch_failure_order(__order));
1283  }
1284 
1285  bool
1286  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1287  memory_order __order = memory_order_seq_cst)
1288  volatile noexcept
1289  {
1290  return compare_exchange_strong(__expected, __desired, __order,
1291  __cmpexch_failure_order(__order));
1292  }
1293 
1294 #if __cpp_lib_atomic_wait
1295  _GLIBCXX_ALWAYS_INLINE void
1296  wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1297  { __atomic_impl::wait(&_M_fp, __old, __m); }
1298 
1299  // TODO add const volatile overload
1300 
1301  _GLIBCXX_ALWAYS_INLINE void
1302  notify_one() const noexcept
1303  { __atomic_impl::notify_one(&_M_fp); }
1304 
1305  // TODO add const volatile overload
1306 
1307  _GLIBCXX_ALWAYS_INLINE void
1308  notify_all() const noexcept
1309  { __atomic_impl::notify_all(&_M_fp); }
1310 
1311  // TODO add const volatile overload
1312 #endif // __cpp_lib_atomic_wait
1313 
1314  value_type
1315  fetch_add(value_type __i,
1316  memory_order __m = memory_order_seq_cst) noexcept
1317  { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1318 
1319  value_type
1320  fetch_add(value_type __i,
1321  memory_order __m = memory_order_seq_cst) volatile noexcept
1322  { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1323 
1324  value_type
1325  fetch_sub(value_type __i,
1326  memory_order __m = memory_order_seq_cst) noexcept
1327  { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1328 
1329  value_type
1330  fetch_sub(value_type __i,
1331  memory_order __m = memory_order_seq_cst) volatile noexcept
1332  { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1333 
1334  value_type
1335  operator+=(value_type __i) noexcept
1336  { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1337 
1338  value_type
1339  operator+=(value_type __i) volatile noexcept
1340  { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1341 
1342  value_type
1343  operator-=(value_type __i) noexcept
1344  { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1345 
1346  value_type
1347  operator-=(value_type __i) volatile noexcept
1348  { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1349 
1350  private:
1351  alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1352  };
1353 #undef _GLIBCXX20_INIT
1354 
1355  template<typename _Tp,
1356  bool = is_integral_v<_Tp>, bool = is_floating_point_v<_Tp>>
1357  struct __atomic_ref;
1358 
1359  // base class for non-integral, non-floating-point, non-pointer types
1360  template<typename _Tp>
1361  struct __atomic_ref<_Tp, false, false>
1362  {
1363  static_assert(is_trivially_copyable_v<_Tp>);
1364 
1365  // 1/2/4/8/16-byte types must be aligned to at least their size.
1366  static constexpr int _S_min_alignment
1367  = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16
1368  ? 0 : sizeof(_Tp);
1369 
1370  public:
1371  using value_type = _Tp;
1372 
1373  static constexpr bool is_always_lock_free
1374  = __atomic_always_lock_free(sizeof(_Tp), 0);
1375 
1376  static constexpr size_t required_alignment
1377  = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1378 
1379  __atomic_ref& operator=(const __atomic_ref&) = delete;
1380 
1381  explicit
1382  __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t))
1383  { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1384 
1385  __atomic_ref(const __atomic_ref&) noexcept = default;
1386 
1387  _Tp
1388  operator=(_Tp __t) const noexcept
1389  {
1390  this->store(__t);
1391  return __t;
1392  }
1393 
1394  operator _Tp() const noexcept { return this->load(); }
1395 
1396  bool
1397  is_lock_free() const noexcept
1398  { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1399 
1400  void
1401  store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1402  { __atomic_impl::store(_M_ptr, __t, __m); }
1403 
1404  _Tp
1405  load(memory_order __m = memory_order_seq_cst) const noexcept
1406  { return __atomic_impl::load(_M_ptr, __m); }
1407 
1408  _Tp
1409  exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
1410  const noexcept
1411  { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1412 
1413  bool
1414  compare_exchange_weak(_Tp& __expected, _Tp __desired,
1415  memory_order __success,
1416  memory_order __failure) const noexcept
1417  {
1418  return __atomic_impl::compare_exchange_weak(_M_ptr,
1419  __expected, __desired,
1420  __success, __failure);
1421  }
1422 
1423  bool
1424  compare_exchange_strong(_Tp& __expected, _Tp __desired,
1425  memory_order __success,
1426  memory_order __failure) const noexcept
1427  {
1428  return __atomic_impl::compare_exchange_strong(_M_ptr,
1429  __expected, __desired,
1430  __success, __failure);
1431  }
1432 
1433  bool
1434  compare_exchange_weak(_Tp& __expected, _Tp __desired,
1435  memory_order __order = memory_order_seq_cst)
1436  const noexcept
1437  {
1438  return compare_exchange_weak(__expected, __desired, __order,
1439  __cmpexch_failure_order(__order));
1440  }
1441 
1442  bool
1443  compare_exchange_strong(_Tp& __expected, _Tp __desired,
1444  memory_order __order = memory_order_seq_cst)
1445  const noexcept
1446  {
1447  return compare_exchange_strong(__expected, __desired, __order,
1448  __cmpexch_failure_order(__order));
1449  }
1450 
1451 #if __cpp_lib_atomic_wait
1452  _GLIBCXX_ALWAYS_INLINE void
1453  wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
1454  { __atomic_impl::wait(_M_ptr, __old, __m); }
1455 
1456  // TODO add const volatile overload
1457 
1458  _GLIBCXX_ALWAYS_INLINE void
1459  notify_one() const noexcept
1460  { __atomic_impl::notify_one(_M_ptr); }
1461 
1462  // TODO add const volatile overload
1463 
1464  _GLIBCXX_ALWAYS_INLINE void
1465  notify_all() const noexcept
1466  { __atomic_impl::notify_all(_M_ptr); }
1467 
1468  // TODO add const volatile overload
1469 #endif // __cpp_lib_atomic_wait
1470 
1471  private:
1472  _Tp* _M_ptr;
1473  };
1474 
1475  // base class for atomic_ref<integral-type>
1476  template<typename _Tp>
1477  struct __atomic_ref<_Tp, true, false>
1478  {
1479  static_assert(is_integral_v<_Tp>);
1480 
1481  public:
1482  using value_type = _Tp;
1483  using difference_type = value_type;
1484 
1485  static constexpr bool is_always_lock_free
1486  = __atomic_always_lock_free(sizeof(_Tp), 0);
1487 
1488  static constexpr size_t required_alignment
1489  = sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp);
1490 
1491  __atomic_ref() = delete;
1492  __atomic_ref& operator=(const __atomic_ref&) = delete;
1493 
1494  explicit
1495  __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1496  { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1497 
1498  __atomic_ref(const __atomic_ref&) noexcept = default;
1499 
1500  _Tp
1501  operator=(_Tp __t) const noexcept
1502  {
1503  this->store(__t);
1504  return __t;
1505  }
1506 
1507  operator _Tp() const noexcept { return this->load(); }
1508 
1509  bool
1510  is_lock_free() const noexcept
1511  {
1512  return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1513  }
1514 
1515  void
1516  store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1517  { __atomic_impl::store(_M_ptr, __t, __m); }
1518 
1519  _Tp
1520  load(memory_order __m = memory_order_seq_cst) const noexcept
1521  { return __atomic_impl::load(_M_ptr, __m); }
1522 
1523  _Tp
1524  exchange(_Tp __desired,
1525  memory_order __m = memory_order_seq_cst) const noexcept
1526  { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1527 
1528  bool
1529  compare_exchange_weak(_Tp& __expected, _Tp __desired,
1530  memory_order __success,
1531  memory_order __failure) const noexcept
1532  {
1533  return __atomic_impl::compare_exchange_weak(_M_ptr,
1534  __expected, __desired,
1535  __success, __failure);
1536  }
1537 
1538  bool
1539  compare_exchange_strong(_Tp& __expected, _Tp __desired,
1540  memory_order __success,
1541  memory_order __failure) const noexcept
1542  {
1543  return __atomic_impl::compare_exchange_strong(_M_ptr,
1544  __expected, __desired,
1545  __success, __failure);
1546  }
1547 
1548  bool
1549  compare_exchange_weak(_Tp& __expected, _Tp __desired,
1550  memory_order __order = memory_order_seq_cst)
1551  const noexcept
1552  {
1553  return compare_exchange_weak(__expected, __desired, __order,
1554  __cmpexch_failure_order(__order));
1555  }
1556 
1557  bool
1558  compare_exchange_strong(_Tp& __expected, _Tp __desired,
1559  memory_order __order = memory_order_seq_cst)
1560  const noexcept
1561  {
1562  return compare_exchange_strong(__expected, __desired, __order,
1563  __cmpexch_failure_order(__order));
1564  }
1565 
1566 #if __cpp_lib_atomic_wait
1567  _GLIBCXX_ALWAYS_INLINE void
1568  wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
1569  { __atomic_impl::wait(_M_ptr, __old, __m); }
1570 
1571  // TODO add const volatile overload
1572 
1573  _GLIBCXX_ALWAYS_INLINE void
1574  notify_one() const noexcept
1575  { __atomic_impl::notify_one(_M_ptr); }
1576 
1577  // TODO add const volatile overload
1578 
1579  _GLIBCXX_ALWAYS_INLINE void
1580  notify_all() const noexcept
1581  { __atomic_impl::notify_all(_M_ptr); }
1582 
1583  // TODO add const volatile overload
1584 #endif // __cpp_lib_atomic_wait
1585 
1586  value_type
1587  fetch_add(value_type __i,
1588  memory_order __m = memory_order_seq_cst) const noexcept
1589  { return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1590 
1591  value_type
1592  fetch_sub(value_type __i,
1593  memory_order __m = memory_order_seq_cst) const noexcept
1594  { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1595 
1596  value_type
1597  fetch_and(value_type __i,
1598  memory_order __m = memory_order_seq_cst) const noexcept
1599  { return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1600 
1601  value_type
1602  fetch_or(value_type __i,
1603  memory_order __m = memory_order_seq_cst) const noexcept
1604  { return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1605 
1606  value_type
1607  fetch_xor(value_type __i,
1608  memory_order __m = memory_order_seq_cst) const noexcept
1609  { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1610 
1611  _GLIBCXX_ALWAYS_INLINE value_type
1612  operator++(int) const noexcept
1613  { return fetch_add(1); }
1614 
1615  _GLIBCXX_ALWAYS_INLINE value_type
1616  operator--(int) const noexcept
1617  { return fetch_sub(1); }
1618 
1619  value_type
1620  operator++() const noexcept
1621  { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1622 
1623  value_type
1624  operator--() const noexcept
1625  { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1626 
1627  value_type
1628  operator+=(value_type __i) const noexcept
1629  { return __atomic_impl::__add_fetch(_M_ptr, __i); }
1630 
1631  value_type
1632  operator-=(value_type __i) const noexcept
1633  { return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1634 
1635  value_type
1636  operator&=(value_type __i) const noexcept
1637  { return __atomic_impl::__and_fetch(_M_ptr, __i); }
1638 
1639  value_type
1640  operator|=(value_type __i) const noexcept
1641  { return __atomic_impl::__or_fetch(_M_ptr, __i); }
1642 
1643  value_type
1644  operator^=(value_type __i) const noexcept
1645  { return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1646 
1647  private:
1648  _Tp* _M_ptr;
1649  };
1650 
1651  // base class for atomic_ref<floating-point-type>
1652  template<typename _Fp>
1653  struct __atomic_ref<_Fp, false, true>
1654  {
1655  static_assert(is_floating_point_v<_Fp>);
1656 
1657  public:
1658  using value_type = _Fp;
1659  using difference_type = value_type;
1660 
1661  static constexpr bool is_always_lock_free
1662  = __atomic_always_lock_free(sizeof(_Fp), 0);
1663 
1664  static constexpr size_t required_alignment = __alignof__(_Fp);
1665 
1666  __atomic_ref() = delete;
1667  __atomic_ref& operator=(const __atomic_ref&) = delete;
1668 
1669  explicit
1670  __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1671  { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1672 
1673  __atomic_ref(const __atomic_ref&) noexcept = default;
1674 
1675  _Fp
1676  operator=(_Fp __t) const noexcept
1677  {
1678  this->store(__t);
1679  return __t;
1680  }
1681 
1682  operator _Fp() const noexcept { return this->load(); }
1683 
1684  bool
1685  is_lock_free() const noexcept
1686  {
1687  return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1688  }
1689 
1690  void
1691  store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept
1692  { __atomic_impl::store(_M_ptr, __t, __m); }
1693 
1694  _Fp
1695  load(memory_order __m = memory_order_seq_cst) const noexcept
1696  { return __atomic_impl::load(_M_ptr, __m); }
1697 
1698  _Fp
1699  exchange(_Fp __desired,
1700  memory_order __m = memory_order_seq_cst) const noexcept
1701  { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1702 
1703  bool
1704  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1705  memory_order __success,
1706  memory_order __failure) const noexcept
1707  {
1708  return __atomic_impl::compare_exchange_weak(_M_ptr,
1709  __expected, __desired,
1710  __success, __failure);
1711  }
1712 
1713  bool
1714  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1715  memory_order __success,
1716  memory_order __failure) const noexcept
1717  {
1718  return __atomic_impl::compare_exchange_strong(_M_ptr,
1719  __expected, __desired,
1720  __success, __failure);
1721  }
1722 
1723  bool
1724  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1725  memory_order __order = memory_order_seq_cst)
1726  const noexcept
1727  {
1728  return compare_exchange_weak(__expected, __desired, __order,
1729  __cmpexch_failure_order(__order));
1730  }
1731 
1732  bool
1733  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1734  memory_order __order = memory_order_seq_cst)
1735  const noexcept
1736  {
1737  return compare_exchange_strong(__expected, __desired, __order,
1738  __cmpexch_failure_order(__order));
1739  }
1740 
1741 #if __cpp_lib_atomic_wait
1742  _GLIBCXX_ALWAYS_INLINE void
1743  wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1744  { __atomic_impl::wait(_M_ptr, __old, __m); }
1745 
1746  // TODO add const volatile overload
1747 
1748  _GLIBCXX_ALWAYS_INLINE void
1749  notify_one() const noexcept
1750  { __atomic_impl::notify_one(_M_ptr); }
1751 
1752  // TODO add const volatile overload
1753 
1754  _GLIBCXX_ALWAYS_INLINE void
1755  notify_all() const noexcept
1756  { __atomic_impl::notify_all(_M_ptr); }
1757 
1758  // TODO add const volatile overload
1759 #endif // __cpp_lib_atomic_wait
1760 
1761  value_type
1762  fetch_add(value_type __i,
1763  memory_order __m = memory_order_seq_cst) const noexcept
1764  { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1765 
1766  value_type
1767  fetch_sub(value_type __i,
1768  memory_order __m = memory_order_seq_cst) const noexcept
1769  { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1770 
1771  value_type
1772  operator+=(value_type __i) const noexcept
1773  { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1774 
1775  value_type
1776  operator-=(value_type __i) const noexcept
1777  { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1778 
1779  private:
1780  _Fp* _M_ptr;
1781  };
1782 
1783  // base class for atomic_ref<pointer-type>
1784  template<typename _Tp>
1785  struct __atomic_ref<_Tp*, false, false>
1786  {
1787  public:
1788  using value_type = _Tp*;
1789  using difference_type = ptrdiff_t;
1790 
1791  static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1792 
1793  static constexpr size_t required_alignment = __alignof__(_Tp*);
1794 
1795  __atomic_ref() = delete;
1796  __atomic_ref& operator=(const __atomic_ref&) = delete;
1797 
1798  explicit
1799  __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t))
1800  { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1801 
1802  __atomic_ref(const __atomic_ref&) noexcept = default;
1803 
1804  _Tp*
1805  operator=(_Tp* __t) const noexcept
1806  {
1807  this->store(__t);
1808  return __t;
1809  }
1810 
1811  operator _Tp*() const noexcept { return this->load(); }
1812 
1813  bool
1814  is_lock_free() const noexcept
1815  {
1816  return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1817  }
1818 
1819  void
1820  store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept
1821  { __atomic_impl::store(_M_ptr, __t, __m); }
1822 
1823  _Tp*
1824  load(memory_order __m = memory_order_seq_cst) const noexcept
1825  { return __atomic_impl::load(_M_ptr, __m); }
1826 
1827  _Tp*
1828  exchange(_Tp* __desired,
1829  memory_order __m = memory_order_seq_cst) const noexcept
1830  { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1831 
1832  bool
1833  compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1834  memory_order __success,
1835  memory_order __failure) const noexcept
1836  {
1837  return __atomic_impl::compare_exchange_weak(_M_ptr,
1838  __expected, __desired,
1839  __success, __failure);
1840  }
1841 
1842  bool
1843  compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1844  memory_order __success,
1845  memory_order __failure) const noexcept
1846  {
1847  return __atomic_impl::compare_exchange_strong(_M_ptr,
1848  __expected, __desired,
1849  __success, __failure);
1850  }
1851 
1852  bool
1853  compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1854  memory_order __order = memory_order_seq_cst)
1855  const noexcept
1856  {
1857  return compare_exchange_weak(__expected, __desired, __order,
1858  __cmpexch_failure_order(__order));
1859  }
1860 
1861  bool
1862  compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1863  memory_order __order = memory_order_seq_cst)
1864  const noexcept
1865  {
1866  return compare_exchange_strong(__expected, __desired, __order,
1867  __cmpexch_failure_order(__order));
1868  }
1869 
1870 #if __cpp_lib_atomic_wait
1871  _GLIBCXX_ALWAYS_INLINE void
1872  wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
1873  { __atomic_impl::wait(_M_ptr, __old, __m); }
1874 
1875  // TODO add const volatile overload
1876 
1877  _GLIBCXX_ALWAYS_INLINE void
1878  notify_one() const noexcept
1879  { __atomic_impl::notify_one(_M_ptr); }
1880 
1881  // TODO add const volatile overload
1882 
1883  _GLIBCXX_ALWAYS_INLINE void
1884  notify_all() const noexcept
1885  { __atomic_impl::notify_all(_M_ptr); }
1886 
1887  // TODO add const volatile overload
1888 #endif // __cpp_lib_atomic_wait
1889 
1890  _GLIBCXX_ALWAYS_INLINE value_type
1891  fetch_add(difference_type __d,
1892  memory_order __m = memory_order_seq_cst) const noexcept
1893  { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
1894 
1895  _GLIBCXX_ALWAYS_INLINE value_type
1896  fetch_sub(difference_type __d,
1897  memory_order __m = memory_order_seq_cst) const noexcept
1898  { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
1899 
1900  value_type
1901  operator++(int) const noexcept
1902  { return fetch_add(1); }
1903 
1904  value_type
1905  operator--(int) const noexcept
1906  { return fetch_sub(1); }
1907 
1908  value_type
1909  operator++() const noexcept
1910  {
1911  return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
1912  }
1913 
1914  value_type
1915  operator--() const noexcept
1916  {
1917  return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
1918  }
1919 
1920  value_type
1921  operator+=(difference_type __d) const noexcept
1922  {
1923  return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
1924  }
1925 
1926  value_type
1927  operator-=(difference_type __d) const noexcept
1928  {
1929  return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
1930  }
1931 
1932  private:
1933  static constexpr ptrdiff_t
1934  _S_type_size(ptrdiff_t __d) noexcept
1935  {
1936  static_assert(is_object_v<_Tp>);
1937  return __d * sizeof(_Tp);
1938  }
1939 
1940  _Tp** _M_ptr;
1941  };
1942 
1943 #endif // C++2a
1944 
1945  // @} group atomics
1946 
1947 _GLIBCXX_END_NAMESPACE_VERSION
1948 } // namespace std
1949 
1950 #endif
auto_ptr & operator=(auto_ptr &__a)
auto_ptr assignment operator.
Definition: auto_ptr.h:47
element_type * release()
Bypassing the smart pointer.
Definition: auto_ptr.h:136
typename conditional< _Cond, _Iftrue, _Iffalse >::type conditional_t
Alias template for conditional.
Definition: type_traits:2518
typename remove_volatile< _Tp >::type remove_volatile_t
Alias template for remove_volatile.
Definition: type_traits:1530
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition: move.h:49
memory_order
Enumeration for memory_order.
Definition: atomic_base.h:79
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
Definition: atomic_base.h:135
ISO C++ entities toplevel namespace is std.
constexpr _Tp exchange(_Tp &__obj, _Up &&__new_val)
Assign __new_val to __obj and return its previous value.
Definition: utility:291
bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition: bitset:1435
Base class for atomic integrals.
Definition: atomic_base.h:316
Base type for atomic_flag.
Definition: atomic_base.h:180
atomic_flag
Definition: atomic_base.h:190