30 #ifndef _GLIBCXX_ATOMIC_BASE_H
31 #define _GLIBCXX_ATOMIC_BASE_H 1
33 #pragma GCC system_header
40 #if __cplusplus > 201703L
44 #ifndef _GLIBCXX_ALWAYS_INLINE
45 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
48 namespace std _GLIBCXX_VISIBILITY(default)
50 _GLIBCXX_BEGIN_NAMESPACE_VERSION
60 #if __cplusplus > 201703L
71 inline constexpr
memory_order memory_order_relaxed = memory_order::relaxed;
72 inline constexpr
memory_order memory_order_consume = memory_order::consume;
73 inline constexpr
memory_order memory_order_acquire = memory_order::acquire;
75 inline constexpr
memory_order memory_order_acq_rel = memory_order::acq_rel;
76 inline constexpr
memory_order memory_order_seq_cst = memory_order::seq_cst;
89 enum __memory_order_modifier
91 __memory_order_mask = 0x0ffff,
92 __memory_order_modifier_mask = 0xffff0000,
93 __memory_order_hle_acquire = 0x10000,
94 __memory_order_hle_release = 0x20000
98 operator|(
memory_order __m, __memory_order_modifier __mod)
113 return __m == memory_order_acq_rel ? memory_order_acquire
114 : __m == memory_order_release ? memory_order_relaxed : __m;
120 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
121 | __memory_order_modifier(__m & __memory_order_modifier_mask));
124 _GLIBCXX_ALWAYS_INLINE
void
126 { __atomic_thread_fence(
int(__m)); }
128 _GLIBCXX_ALWAYS_INLINE
void
130 { __atomic_signal_fence(
int(__m)); }
133 template<
typename _Tp>
142 template<
typename _IntTp>
143 struct __atomic_base;
145 #if __cplusplus <= 201703L
146 # define _GLIBCXX20_INIT(I)
148 # define __cpp_lib_atomic_value_initialization 201911L
149 # define _GLIBCXX20_INIT(I) = I
152 #define ATOMIC_VAR_INIT(_VI) { _VI }
154 template<
typename _Tp>
157 template<
typename _Tp>
161 #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
162 typedef bool __atomic_flag_data_type;
164 typedef unsigned char __atomic_flag_data_type;
177 _GLIBCXX_BEGIN_EXTERN_C
181 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
184 _GLIBCXX_END_EXTERN_C
186 #define ATOMIC_FLAG_INIT { 0 }
202 _GLIBCXX_ALWAYS_INLINE
bool
203 test_and_set(
memory_order __m = memory_order_seq_cst) noexcept
205 return __atomic_test_and_set (&_M_i,
int(__m));
208 _GLIBCXX_ALWAYS_INLINE
bool
209 test_and_set(
memory_order __m = memory_order_seq_cst)
volatile noexcept
211 return __atomic_test_and_set (&_M_i,
int(__m));
214 #if __cplusplus > 201703L
215 #define __cpp_lib_atomic_flag_test 201907L
217 _GLIBCXX_ALWAYS_INLINE
bool
218 test(
memory_order __m = memory_order_seq_cst)
const noexcept
220 __atomic_flag_data_type __v;
221 __atomic_load(&_M_i, &__v,
int(__m));
222 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
225 _GLIBCXX_ALWAYS_INLINE
bool
226 test(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
228 __atomic_flag_data_type __v;
229 __atomic_load(&_M_i, &__v,
int(__m));
230 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
233 #if __cpp_lib_atomic_wait
234 _GLIBCXX_ALWAYS_INLINE
void
238 std::__atomic_wait(&_M_i,
static_cast<__atomic_flag_data_type
>(__old),
240 {
return this->test(__m) != __old; });
245 _GLIBCXX_ALWAYS_INLINE
void
246 notify_one()
const noexcept
247 { std::__atomic_notify(&_M_i,
false); }
251 _GLIBCXX_ALWAYS_INLINE
void
252 notify_all()
const noexcept
253 { std::__atomic_notify(&_M_i,
true); }
259 _GLIBCXX_ALWAYS_INLINE
void
263 = __m & __memory_order_mask;
264 __glibcxx_assert(__b != memory_order_consume);
265 __glibcxx_assert(__b != memory_order_acquire);
266 __glibcxx_assert(__b != memory_order_acq_rel);
268 __atomic_clear (&_M_i,
int(__m));
271 _GLIBCXX_ALWAYS_INLINE
void
272 clear(
memory_order __m = memory_order_seq_cst)
volatile noexcept
275 = __m & __memory_order_mask;
276 __glibcxx_assert(__b != memory_order_consume);
277 __glibcxx_assert(__b != memory_order_acquire);
278 __glibcxx_assert(__b != memory_order_acq_rel);
280 __atomic_clear (&_M_i,
int(__m));
284 static constexpr __atomic_flag_data_type
286 {
return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
314 template<
typename _ITp>
317 using value_type = _ITp;
318 using difference_type = value_type;
321 typedef _ITp __int_type;
323 static constexpr
int _S_alignment =
324 sizeof(_ITp) >
alignof(_ITp) ?
sizeof(_ITp) :
alignof(_ITp);
326 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
336 constexpr
__atomic_base(__int_type __i) noexcept : _M_i (__i) { }
338 operator __int_type()
const noexcept
341 operator __int_type()
const volatile noexcept
345 operator=(__int_type __i) noexcept
352 operator=(__int_type __i)
volatile noexcept
359 operator++(
int) noexcept
360 {
return fetch_add(1); }
363 operator++(
int)
volatile noexcept
364 {
return fetch_add(1); }
367 operator--(
int) noexcept
368 {
return fetch_sub(1); }
371 operator--(
int)
volatile noexcept
372 {
return fetch_sub(1); }
375 operator++() noexcept
376 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
379 operator++()
volatile noexcept
380 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
383 operator--() noexcept
384 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
387 operator--()
volatile noexcept
388 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
391 operator+=(__int_type __i) noexcept
392 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
395 operator+=(__int_type __i)
volatile noexcept
396 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
399 operator-=(__int_type __i) noexcept
400 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
403 operator-=(__int_type __i)
volatile noexcept
404 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
407 operator&=(__int_type __i) noexcept
408 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
411 operator&=(__int_type __i)
volatile noexcept
412 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
415 operator|=(__int_type __i) noexcept
416 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
419 operator|=(__int_type __i)
volatile noexcept
420 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
423 operator^=(__int_type __i) noexcept
424 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
427 operator^=(__int_type __i)
volatile noexcept
428 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
431 is_lock_free()
const noexcept
434 return __atomic_is_lock_free(
sizeof(_M_i),
435 reinterpret_cast<void *
>(-_S_alignment));
439 is_lock_free()
const volatile noexcept
442 return __atomic_is_lock_free(
sizeof(_M_i),
443 reinterpret_cast<void *
>(-_S_alignment));
446 _GLIBCXX_ALWAYS_INLINE
void
447 store(__int_type __i,
memory_order __m = memory_order_seq_cst) noexcept
450 = __m & __memory_order_mask;
451 __glibcxx_assert(__b != memory_order_acquire);
452 __glibcxx_assert(__b != memory_order_acq_rel);
453 __glibcxx_assert(__b != memory_order_consume);
455 __atomic_store_n(&_M_i, __i,
int(__m));
458 _GLIBCXX_ALWAYS_INLINE
void
459 store(__int_type __i,
460 memory_order __m = memory_order_seq_cst)
volatile noexcept
463 = __m & __memory_order_mask;
464 __glibcxx_assert(__b != memory_order_acquire);
465 __glibcxx_assert(__b != memory_order_acq_rel);
466 __glibcxx_assert(__b != memory_order_consume);
468 __atomic_store_n(&_M_i, __i,
int(__m));
471 _GLIBCXX_ALWAYS_INLINE __int_type
472 load(
memory_order __m = memory_order_seq_cst)
const noexcept
475 = __m & __memory_order_mask;
476 __glibcxx_assert(__b != memory_order_release);
477 __glibcxx_assert(__b != memory_order_acq_rel);
479 return __atomic_load_n(&_M_i,
int(__m));
482 _GLIBCXX_ALWAYS_INLINE __int_type
483 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
486 = __m & __memory_order_mask;
487 __glibcxx_assert(__b != memory_order_release);
488 __glibcxx_assert(__b != memory_order_acq_rel);
490 return __atomic_load_n(&_M_i,
int(__m));
493 _GLIBCXX_ALWAYS_INLINE __int_type
494 exchange(__int_type __i,
497 return __atomic_exchange_n(&_M_i, __i,
int(__m));
501 _GLIBCXX_ALWAYS_INLINE __int_type
502 exchange(__int_type __i,
503 memory_order __m = memory_order_seq_cst)
volatile noexcept
505 return __atomic_exchange_n(&_M_i, __i,
int(__m));
508 _GLIBCXX_ALWAYS_INLINE
bool
509 compare_exchange_weak(__int_type& __i1, __int_type __i2,
513 = __m2 & __memory_order_mask;
515 = __m1 & __memory_order_mask;
516 __glibcxx_assert(__b2 != memory_order_release);
517 __glibcxx_assert(__b2 != memory_order_acq_rel);
518 __glibcxx_assert(__b2 <= __b1);
520 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
521 int(__m1),
int(__m2));
524 _GLIBCXX_ALWAYS_INLINE
bool
525 compare_exchange_weak(__int_type& __i1, __int_type __i2,
530 = __m2 & __memory_order_mask;
532 = __m1 & __memory_order_mask;
533 __glibcxx_assert(__b2 != memory_order_release);
534 __glibcxx_assert(__b2 != memory_order_acq_rel);
535 __glibcxx_assert(__b2 <= __b1);
537 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
538 int(__m1),
int(__m2));
541 _GLIBCXX_ALWAYS_INLINE
bool
542 compare_exchange_weak(__int_type& __i1, __int_type __i2,
545 return compare_exchange_weak(__i1, __i2, __m,
546 __cmpexch_failure_order(__m));
549 _GLIBCXX_ALWAYS_INLINE
bool
550 compare_exchange_weak(__int_type& __i1, __int_type __i2,
551 memory_order __m = memory_order_seq_cst)
volatile noexcept
553 return compare_exchange_weak(__i1, __i2, __m,
554 __cmpexch_failure_order(__m));
557 _GLIBCXX_ALWAYS_INLINE
bool
558 compare_exchange_strong(__int_type& __i1, __int_type __i2,
562 = __m2 & __memory_order_mask;
564 = __m1 & __memory_order_mask;
565 __glibcxx_assert(__b2 != memory_order_release);
566 __glibcxx_assert(__b2 != memory_order_acq_rel);
567 __glibcxx_assert(__b2 <= __b1);
569 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
570 int(__m1),
int(__m2));
573 _GLIBCXX_ALWAYS_INLINE
bool
574 compare_exchange_strong(__int_type& __i1, __int_type __i2,
579 = __m2 & __memory_order_mask;
581 = __m1 & __memory_order_mask;
583 __glibcxx_assert(__b2 != memory_order_release);
584 __glibcxx_assert(__b2 != memory_order_acq_rel);
585 __glibcxx_assert(__b2 <= __b1);
587 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
588 int(__m1),
int(__m2));
591 _GLIBCXX_ALWAYS_INLINE
bool
592 compare_exchange_strong(__int_type& __i1, __int_type __i2,
595 return compare_exchange_strong(__i1, __i2, __m,
596 __cmpexch_failure_order(__m));
599 _GLIBCXX_ALWAYS_INLINE
bool
600 compare_exchange_strong(__int_type& __i1, __int_type __i2,
601 memory_order __m = memory_order_seq_cst)
volatile noexcept
603 return compare_exchange_strong(__i1, __i2, __m,
604 __cmpexch_failure_order(__m));
607 #if __cpp_lib_atomic_wait
608 _GLIBCXX_ALWAYS_INLINE
void
609 wait(__int_type __old,
612 std::__atomic_wait(&_M_i, __old,
614 {
return this->load(__m) != __old; });
619 _GLIBCXX_ALWAYS_INLINE
void
620 notify_one()
const noexcept
621 { std::__atomic_notify(&_M_i,
false); }
625 _GLIBCXX_ALWAYS_INLINE
void
626 notify_all()
const noexcept
627 { std::__atomic_notify(&_M_i,
true); }
632 _GLIBCXX_ALWAYS_INLINE __int_type
633 fetch_add(__int_type __i,
635 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
637 _GLIBCXX_ALWAYS_INLINE __int_type
638 fetch_add(__int_type __i,
639 memory_order __m = memory_order_seq_cst)
volatile noexcept
640 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
642 _GLIBCXX_ALWAYS_INLINE __int_type
643 fetch_sub(__int_type __i,
645 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
647 _GLIBCXX_ALWAYS_INLINE __int_type
648 fetch_sub(__int_type __i,
649 memory_order __m = memory_order_seq_cst)
volatile noexcept
650 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
652 _GLIBCXX_ALWAYS_INLINE __int_type
653 fetch_and(__int_type __i,
655 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
657 _GLIBCXX_ALWAYS_INLINE __int_type
658 fetch_and(__int_type __i,
659 memory_order __m = memory_order_seq_cst)
volatile noexcept
660 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
662 _GLIBCXX_ALWAYS_INLINE __int_type
663 fetch_or(__int_type __i,
665 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
667 _GLIBCXX_ALWAYS_INLINE __int_type
668 fetch_or(__int_type __i,
669 memory_order __m = memory_order_seq_cst)
volatile noexcept
670 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
672 _GLIBCXX_ALWAYS_INLINE __int_type
673 fetch_xor(__int_type __i,
675 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
677 _GLIBCXX_ALWAYS_INLINE __int_type
678 fetch_xor(__int_type __i,
679 memory_order __m = memory_order_seq_cst)
volatile noexcept
680 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
685 template<
typename _PTp>
689 typedef _PTp* __pointer_type;
691 __pointer_type _M_p _GLIBCXX20_INIT(
nullptr);
695 _M_type_size(ptrdiff_t __d)
const {
return __d *
sizeof(_PTp); }
698 _M_type_size(ptrdiff_t __d)
const volatile {
return __d *
sizeof(_PTp); }
708 constexpr
__atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
710 operator __pointer_type()
const noexcept
713 operator __pointer_type()
const volatile noexcept
724 operator=(__pointer_type __p)
volatile noexcept
731 operator++(
int) noexcept
732 {
return fetch_add(1); }
735 operator++(
int)
volatile noexcept
736 {
return fetch_add(1); }
739 operator--(
int) noexcept
740 {
return fetch_sub(1); }
743 operator--(
int)
volatile noexcept
744 {
return fetch_sub(1); }
747 operator++() noexcept
748 {
return __atomic_add_fetch(&_M_p, _M_type_size(1),
749 int(memory_order_seq_cst)); }
752 operator++()
volatile noexcept
753 {
return __atomic_add_fetch(&_M_p, _M_type_size(1),
754 int(memory_order_seq_cst)); }
757 operator--() noexcept
758 {
return __atomic_sub_fetch(&_M_p, _M_type_size(1),
759 int(memory_order_seq_cst)); }
762 operator--()
volatile noexcept
763 {
return __atomic_sub_fetch(&_M_p, _M_type_size(1),
764 int(memory_order_seq_cst)); }
767 operator+=(ptrdiff_t __d) noexcept
768 {
return __atomic_add_fetch(&_M_p, _M_type_size(__d),
769 int(memory_order_seq_cst)); }
772 operator+=(ptrdiff_t __d)
volatile noexcept
773 {
return __atomic_add_fetch(&_M_p, _M_type_size(__d),
774 int(memory_order_seq_cst)); }
777 operator-=(ptrdiff_t __d) noexcept
778 {
return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
779 int(memory_order_seq_cst)); }
782 operator-=(ptrdiff_t __d)
volatile noexcept
783 {
return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
784 int(memory_order_seq_cst)); }
787 is_lock_free()
const noexcept
790 return __atomic_is_lock_free(
sizeof(_M_p),
791 reinterpret_cast<void *
>(-__alignof(_M_p)));
795 is_lock_free()
const volatile noexcept
798 return __atomic_is_lock_free(
sizeof(_M_p),
799 reinterpret_cast<void *
>(-__alignof(_M_p)));
802 _GLIBCXX_ALWAYS_INLINE
void
803 store(__pointer_type __p,
807 = __m & __memory_order_mask;
809 __glibcxx_assert(__b != memory_order_acquire);
810 __glibcxx_assert(__b != memory_order_acq_rel);
811 __glibcxx_assert(__b != memory_order_consume);
813 __atomic_store_n(&_M_p, __p,
int(__m));
816 _GLIBCXX_ALWAYS_INLINE
void
817 store(__pointer_type __p,
818 memory_order __m = memory_order_seq_cst)
volatile noexcept
821 = __m & __memory_order_mask;
822 __glibcxx_assert(__b != memory_order_acquire);
823 __glibcxx_assert(__b != memory_order_acq_rel);
824 __glibcxx_assert(__b != memory_order_consume);
826 __atomic_store_n(&_M_p, __p,
int(__m));
829 _GLIBCXX_ALWAYS_INLINE __pointer_type
830 load(
memory_order __m = memory_order_seq_cst)
const noexcept
833 = __m & __memory_order_mask;
834 __glibcxx_assert(__b != memory_order_release);
835 __glibcxx_assert(__b != memory_order_acq_rel);
837 return __atomic_load_n(&_M_p,
int(__m));
840 _GLIBCXX_ALWAYS_INLINE __pointer_type
841 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
844 = __m & __memory_order_mask;
845 __glibcxx_assert(__b != memory_order_release);
846 __glibcxx_assert(__b != memory_order_acq_rel);
848 return __atomic_load_n(&_M_p,
int(__m));
851 _GLIBCXX_ALWAYS_INLINE __pointer_type
855 return __atomic_exchange_n(&_M_p, __p,
int(__m));
859 _GLIBCXX_ALWAYS_INLINE __pointer_type
861 memory_order __m = memory_order_seq_cst)
volatile noexcept
863 return __atomic_exchange_n(&_M_p, __p,
int(__m));
866 _GLIBCXX_ALWAYS_INLINE
bool
867 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
872 = __m2 & __memory_order_mask;
874 = __m1 & __memory_order_mask;
875 __glibcxx_assert(__b2 != memory_order_release);
876 __glibcxx_assert(__b2 != memory_order_acq_rel);
877 __glibcxx_assert(__b2 <= __b1);
879 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
880 int(__m1),
int(__m2));
883 _GLIBCXX_ALWAYS_INLINE
bool
884 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
889 = __m2 & __memory_order_mask;
891 = __m1 & __memory_order_mask;
893 __glibcxx_assert(__b2 != memory_order_release);
894 __glibcxx_assert(__b2 != memory_order_acq_rel);
895 __glibcxx_assert(__b2 <= __b1);
897 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
898 int(__m1),
int(__m2));
901 #if __cpp_lib_atomic_wait
902 _GLIBCXX_ALWAYS_INLINE
void
903 wait(__pointer_type __old,
906 std::__atomic_wait(&_M_p, __old,
908 {
return this->load(__m) != __old; });
913 _GLIBCXX_ALWAYS_INLINE
void
914 notify_one()
const noexcept
915 { std::__atomic_notify(&_M_p,
false); }
919 _GLIBCXX_ALWAYS_INLINE
void
920 notify_all()
const noexcept
921 { std::__atomic_notify(&_M_p,
true); }
926 _GLIBCXX_ALWAYS_INLINE __pointer_type
927 fetch_add(ptrdiff_t __d,
929 {
return __atomic_fetch_add(&_M_p, _M_type_size(__d),
int(__m)); }
931 _GLIBCXX_ALWAYS_INLINE __pointer_type
932 fetch_add(ptrdiff_t __d,
933 memory_order __m = memory_order_seq_cst)
volatile noexcept
934 {
return __atomic_fetch_add(&_M_p, _M_type_size(__d),
int(__m)); }
936 _GLIBCXX_ALWAYS_INLINE __pointer_type
937 fetch_sub(ptrdiff_t __d,
939 {
return __atomic_fetch_sub(&_M_p, _M_type_size(__d),
int(__m)); }
941 _GLIBCXX_ALWAYS_INLINE __pointer_type
942 fetch_sub(ptrdiff_t __d,
943 memory_order __m = memory_order_seq_cst)
volatile noexcept
944 {
return __atomic_fetch_sub(&_M_p, _M_type_size(__d),
int(__m)); }
947 #if __cplusplus > 201703L
949 namespace __atomic_impl
952 template<
typename _Tp>
956 template<
typename _Tp>
959 template<
size_t _Size,
size_t _Align>
960 _GLIBCXX_ALWAYS_INLINE
bool
961 is_lock_free() noexcept
964 return __atomic_is_lock_free(_Size,
reinterpret_cast<void *
>(-_Align));
967 template<
typename _Tp>
968 _GLIBCXX_ALWAYS_INLINE
void
969 store(_Tp* __ptr, _Val<_Tp> __t,
memory_order __m) noexcept
972 template<
typename _Tp>
973 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
976 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
977 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
978 __atomic_load(__ptr, __dest,
int(__m));
982 template<
typename _Tp>
983 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
986 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
987 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
992 template<
typename _Tp>
993 _GLIBCXX_ALWAYS_INLINE
bool
994 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1000 int(__success),
int(__failure));
1003 template<
typename _Tp>
1004 _GLIBCXX_ALWAYS_INLINE
bool
1005 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1011 int(__success),
int(__failure));
1014 #if __cpp_lib_atomic_wait
1015 template<
typename _Tp>
1016 _GLIBCXX_ALWAYS_INLINE
void
1017 wait(
const _Tp* __ptr, _Val<_Tp> __old,
1020 std::__atomic_wait(__ptr, __old,
1021 [=]() {
return load(__ptr, __m) == __old; });
1026 template<
typename _Tp>
1027 _GLIBCXX_ALWAYS_INLINE
void
1028 notify_one(
const _Tp* __ptr) noexcept
1029 { std::__atomic_notify(__ptr,
false); }
1033 template<
typename _Tp>
1034 _GLIBCXX_ALWAYS_INLINE
void
1035 notify_all(
const _Tp* __ptr) noexcept
1036 { std::__atomic_notify(__ptr,
true); }
1041 template<
typename _Tp>
1042 _GLIBCXX_ALWAYS_INLINE _Tp
1043 fetch_add(_Tp* __ptr, _Diff<_Tp> __i,
memory_order __m) noexcept
1044 {
return __atomic_fetch_add(__ptr, __i,
int(__m)); }
1046 template<
typename _Tp>
1047 _GLIBCXX_ALWAYS_INLINE _Tp
1048 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i,
memory_order __m) noexcept
1049 {
return __atomic_fetch_sub(__ptr, __i,
int(__m)); }
1051 template<
typename _Tp>
1052 _GLIBCXX_ALWAYS_INLINE _Tp
1053 fetch_and(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m) noexcept
1054 {
return __atomic_fetch_and(__ptr, __i,
int(__m)); }
1056 template<
typename _Tp>
1057 _GLIBCXX_ALWAYS_INLINE _Tp
1058 fetch_or(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m) noexcept
1059 {
return __atomic_fetch_or(__ptr, __i,
int(__m)); }
1061 template<
typename _Tp>
1062 _GLIBCXX_ALWAYS_INLINE _Tp
1063 fetch_xor(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m) noexcept
1064 {
return __atomic_fetch_xor(__ptr, __i,
int(__m)); }
1066 template<
typename _Tp>
1067 _GLIBCXX_ALWAYS_INLINE _Tp
1068 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1069 {
return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1071 template<
typename _Tp>
1072 _GLIBCXX_ALWAYS_INLINE _Tp
1073 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1074 {
return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1076 template<
typename _Tp>
1077 _GLIBCXX_ALWAYS_INLINE _Tp
1078 __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1079 {
return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1081 template<
typename _Tp>
1082 _GLIBCXX_ALWAYS_INLINE _Tp
1083 __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1084 {
return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1086 template<
typename _Tp>
1087 _GLIBCXX_ALWAYS_INLINE _Tp
1088 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1089 {
return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1091 template<
typename _Tp>
1093 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m) noexcept
1095 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1096 _Val<_Tp> __newval = __oldval + __i;
1097 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1098 memory_order_relaxed))
1099 __newval = __oldval + __i;
1103 template<
typename _Tp>
1105 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m) noexcept
1107 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1108 _Val<_Tp> __newval = __oldval - __i;
1109 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1110 memory_order_relaxed))
1111 __newval = __oldval - __i;
1115 template<
typename _Tp>
1117 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1119 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1120 _Val<_Tp> __newval = __oldval + __i;
1121 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1122 memory_order_seq_cst,
1123 memory_order_relaxed))
1124 __newval = __oldval + __i;
1128 template<
typename _Tp>
1130 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1132 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1133 _Val<_Tp> __newval = __oldval - __i;
1134 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1135 memory_order_seq_cst,
1136 memory_order_relaxed))
1137 __newval = __oldval - __i;
1143 template<
typename _Fp>
1144 struct __atomic_float
1146 static_assert(is_floating_point_v<_Fp>);
1148 static constexpr
size_t _S_alignment = __alignof__(_Fp);
1151 using value_type = _Fp;
1152 using difference_type = value_type;
1154 static constexpr
bool is_always_lock_free
1155 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1157 __atomic_float() =
default;
1160 __atomic_float(_Fp __t) : _M_fp(__t)
1163 __atomic_float(
const __atomic_float&) =
delete;
1164 __atomic_float&
operator=(
const __atomic_float&) =
delete;
1165 __atomic_float&
operator=(
const __atomic_float&)
volatile =
delete;
1182 is_lock_free() const volatile noexcept
1183 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1186 is_lock_free() const noexcept
1187 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1190 store(_Fp __t,
memory_order __m = memory_order_seq_cst)
volatile noexcept
1191 { __atomic_impl::store(&_M_fp, __t, __m); }
1194 store(_Fp __t,
memory_order __m = memory_order_seq_cst) noexcept
1195 { __atomic_impl::store(&_M_fp, __t, __m); }
1198 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
1199 {
return __atomic_impl::load(&_M_fp, __m); }
1202 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1203 {
return __atomic_impl::load(&_M_fp, __m); }
1205 operator _Fp() const volatile noexcept {
return this->load(); }
1206 operator _Fp() const noexcept {
return this->load(); }
1210 memory_order __m = memory_order_seq_cst)
volatile noexcept
1219 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1223 return __atomic_impl::compare_exchange_weak(&_M_fp,
1224 __expected, __desired,
1225 __success, __failure);
1229 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1233 return __atomic_impl::compare_exchange_weak(&_M_fp,
1234 __expected, __desired,
1235 __success, __failure);
1239 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1243 return __atomic_impl::compare_exchange_strong(&_M_fp,
1244 __expected, __desired,
1245 __success, __failure);
1249 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1253 return __atomic_impl::compare_exchange_strong(&_M_fp,
1254 __expected, __desired,
1255 __success, __failure);
1259 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1263 return compare_exchange_weak(__expected, __desired, __order,
1264 __cmpexch_failure_order(__order));
1268 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1272 return compare_exchange_weak(__expected, __desired, __order,
1273 __cmpexch_failure_order(__order));
1277 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1281 return compare_exchange_strong(__expected, __desired, __order,
1282 __cmpexch_failure_order(__order));
1286 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1290 return compare_exchange_strong(__expected, __desired, __order,
1291 __cmpexch_failure_order(__order));
1294 #if __cpp_lib_atomic_wait
1295 _GLIBCXX_ALWAYS_INLINE
void
1296 wait(_Fp __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1297 { __atomic_impl::wait(&_M_fp, __old, __m); }
1301 _GLIBCXX_ALWAYS_INLINE
void
1302 notify_one() const noexcept
1303 { __atomic_impl::notify_one(&_M_fp); }
1307 _GLIBCXX_ALWAYS_INLINE
void
1308 notify_all() const noexcept
1309 { __atomic_impl::notify_all(&_M_fp); }
1315 fetch_add(value_type __i,
1317 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1320 fetch_add(value_type __i,
1321 memory_order __m = memory_order_seq_cst)
volatile noexcept
1322 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1325 fetch_sub(value_type __i,
1327 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1330 fetch_sub(value_type __i,
1331 memory_order __m = memory_order_seq_cst)
volatile noexcept
1332 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1335 operator+=(value_type __i) noexcept
1336 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1339 operator+=(value_type __i)
volatile noexcept
1340 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1343 operator-=(value_type __i) noexcept
1344 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1347 operator-=(value_type __i)
volatile noexcept
1348 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1351 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1353 #undef _GLIBCXX20_INIT
1355 template<
typename _Tp,
1356 bool = is_integral_v<_Tp>,
bool = is_floating_point_v<_Tp>>
1357 struct __atomic_ref;
1360 template<
typename _Tp>
1361 struct __atomic_ref<_Tp, false, false>
1363 static_assert(is_trivially_copyable_v<_Tp>);
1366 static constexpr
int _S_min_alignment
1367 = (
sizeof(_Tp) & (
sizeof(_Tp) - 1)) ||
sizeof(_Tp) > 16
1371 using value_type = _Tp;
1373 static constexpr
bool is_always_lock_free
1374 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1376 static constexpr
size_t required_alignment
1377 = _S_min_alignment >
alignof(_Tp) ? _S_min_alignment :
alignof(_Tp);
1379 __atomic_ref&
operator=(
const __atomic_ref&) =
delete;
1383 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1385 __atomic_ref(
const __atomic_ref&) noexcept =
default;
1394 operator _Tp() const noexcept {
return this->load(); }
1397 is_lock_free() const noexcept
1398 {
return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1401 store(_Tp __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1402 { __atomic_impl::store(_M_ptr, __t, __m); }
1405 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1406 {
return __atomic_impl::load(_M_ptr, __m); }
1414 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1418 return __atomic_impl::compare_exchange_weak(_M_ptr,
1419 __expected, __desired,
1420 __success, __failure);
1424 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1428 return __atomic_impl::compare_exchange_strong(_M_ptr,
1429 __expected, __desired,
1430 __success, __failure);
1434 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1438 return compare_exchange_weak(__expected, __desired, __order,
1439 __cmpexch_failure_order(__order));
1443 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1447 return compare_exchange_strong(__expected, __desired, __order,
1448 __cmpexch_failure_order(__order));
1451 #if __cpp_lib_atomic_wait
1452 _GLIBCXX_ALWAYS_INLINE
void
1453 wait(_Tp __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1454 { __atomic_impl::wait(_M_ptr, __old, __m); }
1458 _GLIBCXX_ALWAYS_INLINE
void
1459 notify_one() const noexcept
1460 { __atomic_impl::notify_one(_M_ptr); }
1464 _GLIBCXX_ALWAYS_INLINE
void
1465 notify_all() const noexcept
1466 { __atomic_impl::notify_all(_M_ptr); }
1476 template<
typename _Tp>
1477 struct __atomic_ref<_Tp, true, false>
1479 static_assert(is_integral_v<_Tp>);
1482 using value_type = _Tp;
1483 using difference_type = value_type;
1485 static constexpr
bool is_always_lock_free
1486 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1488 static constexpr
size_t required_alignment
1489 =
sizeof(_Tp) >
alignof(_Tp) ?
sizeof(_Tp) :
alignof(_Tp);
1491 __atomic_ref() =
delete;
1492 __atomic_ref&
operator=(
const __atomic_ref&) =
delete;
1495 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1496 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1498 __atomic_ref(
const __atomic_ref&) noexcept =
default;
1507 operator _Tp() const noexcept {
return this->load(); }
1510 is_lock_free() const noexcept
1512 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1516 store(_Tp __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1517 { __atomic_impl::store(_M_ptr, __t, __m); }
1520 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1521 {
return __atomic_impl::load(_M_ptr, __m); }
1525 memory_order __m = memory_order_seq_cst)
const noexcept
1529 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1533 return __atomic_impl::compare_exchange_weak(_M_ptr,
1534 __expected, __desired,
1535 __success, __failure);
1539 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1543 return __atomic_impl::compare_exchange_strong(_M_ptr,
1544 __expected, __desired,
1545 __success, __failure);
1549 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1553 return compare_exchange_weak(__expected, __desired, __order,
1554 __cmpexch_failure_order(__order));
1558 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1562 return compare_exchange_strong(__expected, __desired, __order,
1563 __cmpexch_failure_order(__order));
1566 #if __cpp_lib_atomic_wait
1567 _GLIBCXX_ALWAYS_INLINE
void
1568 wait(_Tp __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1569 { __atomic_impl::wait(_M_ptr, __old, __m); }
1573 _GLIBCXX_ALWAYS_INLINE
void
1574 notify_one() const noexcept
1575 { __atomic_impl::notify_one(_M_ptr); }
1579 _GLIBCXX_ALWAYS_INLINE
void
1580 notify_all() const noexcept
1581 { __atomic_impl::notify_all(_M_ptr); }
1587 fetch_add(value_type __i,
1588 memory_order __m = memory_order_seq_cst)
const noexcept
1589 {
return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1592 fetch_sub(value_type __i,
1593 memory_order __m = memory_order_seq_cst)
const noexcept
1594 {
return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1597 fetch_and(value_type __i,
1598 memory_order __m = memory_order_seq_cst)
const noexcept
1599 {
return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1602 fetch_or(value_type __i,
1603 memory_order __m = memory_order_seq_cst)
const noexcept
1604 {
return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1607 fetch_xor(value_type __i,
1608 memory_order __m = memory_order_seq_cst)
const noexcept
1609 {
return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1611 _GLIBCXX_ALWAYS_INLINE value_type
1612 operator++(
int)
const noexcept
1613 {
return fetch_add(1); }
1615 _GLIBCXX_ALWAYS_INLINE value_type
1616 operator--(
int)
const noexcept
1617 {
return fetch_sub(1); }
1620 operator++() const noexcept
1621 {
return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1624 operator--() const noexcept
1625 {
return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1628 operator+=(value_type __i)
const noexcept
1629 {
return __atomic_impl::__add_fetch(_M_ptr, __i); }
1632 operator-=(value_type __i)
const noexcept
1633 {
return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1636 operator&=(value_type __i)
const noexcept
1637 {
return __atomic_impl::__and_fetch(_M_ptr, __i); }
1640 operator|=(value_type __i)
const noexcept
1641 {
return __atomic_impl::__or_fetch(_M_ptr, __i); }
1644 operator^=(value_type __i)
const noexcept
1645 {
return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1652 template<
typename _Fp>
1653 struct __atomic_ref<_Fp, false, true>
1655 static_assert(is_floating_point_v<_Fp>);
1658 using value_type = _Fp;
1659 using difference_type = value_type;
1661 static constexpr
bool is_always_lock_free
1662 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1664 static constexpr
size_t required_alignment = __alignof__(_Fp);
1666 __atomic_ref() =
delete;
1667 __atomic_ref&
operator=(
const __atomic_ref&) =
delete;
1670 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1671 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1673 __atomic_ref(
const __atomic_ref&) noexcept =
default;
1682 operator _Fp() const noexcept {
return this->load(); }
1685 is_lock_free() const noexcept
1687 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1691 store(_Fp __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1692 { __atomic_impl::store(_M_ptr, __t, __m); }
1695 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1696 {
return __atomic_impl::load(_M_ptr, __m); }
1700 memory_order __m = memory_order_seq_cst)
const noexcept
1704 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1708 return __atomic_impl::compare_exchange_weak(_M_ptr,
1709 __expected, __desired,
1710 __success, __failure);
1714 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1718 return __atomic_impl::compare_exchange_strong(_M_ptr,
1719 __expected, __desired,
1720 __success, __failure);
1724 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1728 return compare_exchange_weak(__expected, __desired, __order,
1729 __cmpexch_failure_order(__order));
1733 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1737 return compare_exchange_strong(__expected, __desired, __order,
1738 __cmpexch_failure_order(__order));
1741 #if __cpp_lib_atomic_wait
1742 _GLIBCXX_ALWAYS_INLINE
void
1743 wait(_Fp __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1744 { __atomic_impl::wait(_M_ptr, __old, __m); }
1748 _GLIBCXX_ALWAYS_INLINE
void
1749 notify_one() const noexcept
1750 { __atomic_impl::notify_one(_M_ptr); }
1754 _GLIBCXX_ALWAYS_INLINE
void
1755 notify_all() const noexcept
1756 { __atomic_impl::notify_all(_M_ptr); }
1762 fetch_add(value_type __i,
1763 memory_order __m = memory_order_seq_cst)
const noexcept
1764 {
return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1767 fetch_sub(value_type __i,
1768 memory_order __m = memory_order_seq_cst)
const noexcept
1769 {
return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1772 operator+=(value_type __i)
const noexcept
1773 {
return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1776 operator-=(value_type __i)
const noexcept
1777 {
return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1784 template<
typename _Tp>
1785 struct __atomic_ref<_Tp*,
false,
false>
1788 using value_type = _Tp*;
1789 using difference_type = ptrdiff_t;
1791 static constexpr
bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1793 static constexpr
size_t required_alignment = __alignof__(_Tp*);
1795 __atomic_ref() =
delete;
1796 __atomic_ref&
operator=(
const __atomic_ref&) =
delete;
1800 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1802 __atomic_ref(
const __atomic_ref&) noexcept =
default;
1811 operator _Tp*()
const noexcept {
return this->load(); }
1814 is_lock_free() const noexcept
1816 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1820 store(_Tp* __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1821 { __atomic_impl::store(_M_ptr, __t, __m); }
1824 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1825 {
return __atomic_impl::load(_M_ptr, __m); }
1829 memory_order __m = memory_order_seq_cst)
const noexcept
1833 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1837 return __atomic_impl::compare_exchange_weak(_M_ptr,
1838 __expected, __desired,
1839 __success, __failure);
1843 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1847 return __atomic_impl::compare_exchange_strong(_M_ptr,
1848 __expected, __desired,
1849 __success, __failure);
1853 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1857 return compare_exchange_weak(__expected, __desired, __order,
1858 __cmpexch_failure_order(__order));
1862 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1866 return compare_exchange_strong(__expected, __desired, __order,
1867 __cmpexch_failure_order(__order));
1870 #if __cpp_lib_atomic_wait
1871 _GLIBCXX_ALWAYS_INLINE
void
1872 wait(_Tp __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1873 { __atomic_impl::wait(_M_ptr, __old, __m); }
1877 _GLIBCXX_ALWAYS_INLINE
void
1878 notify_one() const noexcept
1879 { __atomic_impl::notify_one(_M_ptr); }
1883 _GLIBCXX_ALWAYS_INLINE
void
1884 notify_all() const noexcept
1885 { __atomic_impl::notify_all(_M_ptr); }
1890 _GLIBCXX_ALWAYS_INLINE value_type
1891 fetch_add(difference_type __d,
1892 memory_order __m = memory_order_seq_cst)
const noexcept
1893 {
return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
1895 _GLIBCXX_ALWAYS_INLINE value_type
1896 fetch_sub(difference_type __d,
1897 memory_order __m = memory_order_seq_cst)
const noexcept
1898 {
return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
1901 operator++(
int)
const noexcept
1902 {
return fetch_add(1); }
1905 operator--(
int)
const noexcept
1906 {
return fetch_sub(1); }
1909 operator++() const noexcept
1911 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
1915 operator--() const noexcept
1917 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
1921 operator+=(difference_type __d)
const noexcept
1923 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
1927 operator-=(difference_type __d)
const noexcept
1929 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
1933 static constexpr ptrdiff_t
1934 _S_type_size(ptrdiff_t __d) noexcept
1936 static_assert(is_object_v<_Tp>);
1937 return __d *
sizeof(_Tp);
1947 _GLIBCXX_END_NAMESPACE_VERSION
auto_ptr & operator=(auto_ptr &__a)
auto_ptr assignment operator.
element_type * release()
Bypassing the smart pointer.
typename conditional< _Cond, _Iftrue, _Iffalse >::type conditional_t
Alias template for conditional.
typename remove_volatile< _Tp >::type remove_volatile_t
Alias template for remove_volatile.
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
memory_order
Enumeration for memory_order.
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
ISO C++ entities toplevel namespace is std.
constexpr _Tp exchange(_Tp &__obj, _Up &&__new_val)
Assign __new_val to __obj and return its previous value.
bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Base class for atomic integrals.
Base type for atomic_flag.