libstdc++
shared_ptr_atomic.h
Go to the documentation of this file.
1 // shared_ptr atomic access -*- C++ -*-
2 
3 // Copyright (C) 2014-2022 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/shared_ptr_atomic.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{memory}
28  */
29 
30 #ifndef _SHARED_PTR_ATOMIC_H
31 #define _SHARED_PTR_ATOMIC_H 1
32 
33 #include <bits/atomic_base.h>
34 
35 namespace std _GLIBCXX_VISIBILITY(default)
36 {
37 _GLIBCXX_BEGIN_NAMESPACE_VERSION
38 
39  /**
40  * @addtogroup pointer_abstractions
41  * @{
42  */
43  /// @relates shared_ptr @{
44 
45  /// @cond undocumented
46 
47  struct _Sp_locker
48  {
49  _Sp_locker(const _Sp_locker&) = delete;
50  _Sp_locker& operator=(const _Sp_locker&) = delete;
51 
52 #ifdef __GTHREADS
53  explicit
54  _Sp_locker(const void*) noexcept;
55  _Sp_locker(const void*, const void*) noexcept;
56  ~_Sp_locker();
57 
58  private:
59  unsigned char _M_key1;
60  unsigned char _M_key2;
61 #else
62  explicit _Sp_locker(const void*, const void* = nullptr) { }
63 #endif
64  };
65 
66  /// @endcond
67 
68  /**
69  * @brief Report whether shared_ptr atomic operations are lock-free.
70  * @param __p A non-null pointer to a shared_ptr object.
71  * @return True if atomic access to @c *__p is lock-free, false otherwise.
72  * @{
73  */
74  template<typename _Tp, _Lock_policy _Lp>
75  inline bool
76  atomic_is_lock_free(const __shared_ptr<_Tp, _Lp>* __p)
77  {
78 #ifdef __GTHREADS
79  return __gthread_active_p() == 0;
80 #else
81  return true;
82 #endif
83  }
84 
85  template<typename _Tp>
86  inline bool
87  atomic_is_lock_free(const shared_ptr<_Tp>* __p)
88  { return std::atomic_is_lock_free<_Tp, __default_lock_policy>(__p); }
89 
90  /// @}
91 
92  /**
93  * @brief Atomic load for shared_ptr objects.
94  * @param __p A non-null pointer to a shared_ptr object.
95  * @return @c *__p
96  *
97  * The memory order shall not be @c memory_order_release or
98  * @c memory_order_acq_rel.
99  * @{
100  */
101  template<typename _Tp>
102  inline shared_ptr<_Tp>
104  {
105  _Sp_locker __lock{__p};
106  return *__p;
107  }
108 
109  template<typename _Tp>
110  inline shared_ptr<_Tp>
111  atomic_load(const shared_ptr<_Tp>* __p)
112  { return std::atomic_load_explicit(__p, memory_order_seq_cst); }
113 
114  template<typename _Tp, _Lock_policy _Lp>
115  inline __shared_ptr<_Tp, _Lp>
116  atomic_load_explicit(const __shared_ptr<_Tp, _Lp>* __p, memory_order)
117  {
118  _Sp_locker __lock{__p};
119  return *__p;
120  }
121 
122  template<typename _Tp, _Lock_policy _Lp>
123  inline __shared_ptr<_Tp, _Lp>
124  atomic_load(const __shared_ptr<_Tp, _Lp>* __p)
125  { return std::atomic_load_explicit(__p, memory_order_seq_cst); }
126  /// @}
127 
128  /**
129  * @brief Atomic store for shared_ptr objects.
130  * @param __p A non-null pointer to a shared_ptr object.
131  * @param __r The value to store.
132  *
133  * The memory order shall not be @c memory_order_acquire or
134  * @c memory_order_acq_rel.
135  * @{
136  */
137  template<typename _Tp>
138  inline void
140  memory_order)
141  {
142  _Sp_locker __lock{__p};
143  __p->swap(__r); // use swap so that **__p not destroyed while lock held
144  }
145 
146  template<typename _Tp>
147  inline void
148  atomic_store(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
149  { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); }
150 
151  template<typename _Tp, _Lock_policy _Lp>
152  inline void
153  atomic_store_explicit(__shared_ptr<_Tp, _Lp>* __p,
154  __shared_ptr<_Tp, _Lp> __r,
155  memory_order)
156  {
157  _Sp_locker __lock{__p};
158  __p->swap(__r); // use swap so that **__p not destroyed while lock held
159  }
160 
161  template<typename _Tp, _Lock_policy _Lp>
162  inline void
163  atomic_store(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r)
164  { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); }
165  /// @}
166 
167  /**
168  * @brief Atomic exchange for shared_ptr objects.
169  * @param __p A non-null pointer to a shared_ptr object.
170  * @param __r New value to store in @c *__p.
171  * @return The original value of @c *__p
172  * @{
173  */
174  template<typename _Tp>
175  inline shared_ptr<_Tp>
177  memory_order)
178  {
179  _Sp_locker __lock{__p};
180  __p->swap(__r);
181  return __r;
182  }
183 
184  template<typename _Tp>
185  inline shared_ptr<_Tp>
186  atomic_exchange(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
187  {
188  return std::atomic_exchange_explicit(__p, std::move(__r),
189  memory_order_seq_cst);
190  }
191 
192  template<typename _Tp, _Lock_policy _Lp>
193  inline __shared_ptr<_Tp, _Lp>
194  atomic_exchange_explicit(__shared_ptr<_Tp, _Lp>* __p,
195  __shared_ptr<_Tp, _Lp> __r,
196  memory_order)
197  {
198  _Sp_locker __lock{__p};
199  __p->swap(__r);
200  return __r;
201  }
202 
203  template<typename _Tp, _Lock_policy _Lp>
204  inline __shared_ptr<_Tp, _Lp>
205  atomic_exchange(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r)
206  {
207  return std::atomic_exchange_explicit(__p, std::move(__r),
208  memory_order_seq_cst);
209  }
210  /// @}
211 
212  /**
213  * @brief Atomic compare-and-swap for shared_ptr objects.
214  * @param __p A non-null pointer to a shared_ptr object.
215  * @param __v A non-null pointer to a shared_ptr object.
216  * @param __w A non-null pointer to a shared_ptr object.
217  * @return True if @c *__p was equivalent to @c *__v, false otherwise.
218  *
219  * The memory order for failure shall not be @c memory_order_release or
220  * @c memory_order_acq_rel, or stronger than the memory order for success.
221  * @{
222  */
223  template<typename _Tp>
224  bool
226  shared_ptr<_Tp>* __v,
227  shared_ptr<_Tp> __w,
228  memory_order,
229  memory_order)
230  {
231  shared_ptr<_Tp> __x; // goes out of scope after __lock
232  _Sp_locker __lock{__p, __v};
234  if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p))
235  {
236  __x = std::move(*__p);
237  *__p = std::move(__w);
238  return true;
239  }
240  __x = std::move(*__v);
241  *__v = *__p;
242  return false;
243  }
244 
245  template<typename _Tp>
246  inline bool
247  atomic_compare_exchange_strong(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v,
248  shared_ptr<_Tp> __w)
249  {
250  return std::atomic_compare_exchange_strong_explicit(__p, __v,
251  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
252  }
253 
254  template<typename _Tp>
255  inline bool
256  atomic_compare_exchange_weak_explicit(shared_ptr<_Tp>* __p,
257  shared_ptr<_Tp>* __v,
258  shared_ptr<_Tp> __w,
259  memory_order __success,
260  memory_order __failure)
261  {
262  return std::atomic_compare_exchange_strong_explicit(__p, __v,
263  std::move(__w), __success, __failure);
264  }
265 
266  template<typename _Tp>
267  inline bool
268  atomic_compare_exchange_weak(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v,
269  shared_ptr<_Tp> __w)
270  {
271  return std::atomic_compare_exchange_weak_explicit(__p, __v,
272  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
273  }
274 
275  template<typename _Tp, _Lock_policy _Lp>
276  bool
277  atomic_compare_exchange_strong_explicit(__shared_ptr<_Tp, _Lp>* __p,
278  __shared_ptr<_Tp, _Lp>* __v,
279  __shared_ptr<_Tp, _Lp> __w,
280  memory_order,
281  memory_order)
282  {
283  __shared_ptr<_Tp, _Lp> __x; // goes out of scope after __lock
284  _Sp_locker __lock{__p, __v};
285  owner_less<__shared_ptr<_Tp, _Lp>> __less;
286  if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p))
287  {
288  __x = std::move(*__p);
289  *__p = std::move(__w);
290  return true;
291  }
292  __x = std::move(*__v);
293  *__v = *__p;
294  return false;
295  }
296 
297  template<typename _Tp, _Lock_policy _Lp>
298  inline bool
299  atomic_compare_exchange_strong(__shared_ptr<_Tp, _Lp>* __p,
300  __shared_ptr<_Tp, _Lp>* __v,
301  __shared_ptr<_Tp, _Lp> __w)
302  {
303  return std::atomic_compare_exchange_strong_explicit(__p, __v,
304  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
305  }
306 
307  template<typename _Tp, _Lock_policy _Lp>
308  inline bool
309  atomic_compare_exchange_weak_explicit(__shared_ptr<_Tp, _Lp>* __p,
310  __shared_ptr<_Tp, _Lp>* __v,
311  __shared_ptr<_Tp, _Lp> __w,
312  memory_order __success,
313  memory_order __failure)
314  {
315  return std::atomic_compare_exchange_strong_explicit(__p, __v,
316  std::move(__w), __success, __failure);
317  }
318 
319  template<typename _Tp, _Lock_policy _Lp>
320  inline bool
321  atomic_compare_exchange_weak(__shared_ptr<_Tp, _Lp>* __p,
322  __shared_ptr<_Tp, _Lp>* __v,
323  __shared_ptr<_Tp, _Lp> __w)
324  {
325  return std::atomic_compare_exchange_weak_explicit(__p, __v,
326  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
327  }
328  /// @}
329 
330 #if __cplusplus >= 202002L
331 # define __cpp_lib_atomic_shared_ptr 201711L
332  template<typename _Tp>
333  class atomic;
334 
335  template<typename _Up>
336  static constexpr bool __is_shared_ptr = false;
337  template<typename _Up>
338  static constexpr bool __is_shared_ptr<shared_ptr<_Up>> = true;
339 
340  template<typename _Tp>
341  class _Sp_atomic
342  {
343  using value_type = _Tp;
344 
345  friend class atomic<_Tp>;
346 
347  // An atomic version of __shared_count<> and __weak_count<>.
348  // Stores a _Sp_counted_base<>* but uses the LSB as a lock.
349  struct _Atomic_count
350  {
351  // Either __shared_count<> or __weak_count<>
352  using __count_type = decltype(_Tp::_M_refcount);
353 
354  // _Sp_counted_base<>*
355  using pointer = decltype(__count_type::_M_pi);
356 
357  // Ensure we can use the LSB as the lock bit.
358  static_assert(alignof(remove_pointer_t<pointer>) > 1);
359 
360  _Atomic_count() : _M_val(0) { }
361 
362  explicit
363  _Atomic_count(__count_type&& __c) noexcept
364  : _M_val(reinterpret_cast<uintptr_t>(__c._M_pi))
365  {
366  __c._M_pi = nullptr;
367  }
368 
369  ~_Atomic_count()
370  {
371  auto __val = _M_val.load(memory_order_relaxed);
372  __glibcxx_assert(!(__val & _S_lock_bit));
373  if (auto __pi = reinterpret_cast<pointer>(__val))
374  {
375  if constexpr (__is_shared_ptr<_Tp>)
376  __pi->_M_release();
377  else
378  __pi->_M_weak_release();
379  }
380  }
381 
382  _Atomic_count(const _Atomic_count&) = delete;
383  _Atomic_count& operator=(const _Atomic_count&) = delete;
384 
385  // Precondition: Caller does not hold lock!
386  // Returns the raw pointer value without the lock bit set.
387  pointer
388  lock(memory_order __o) const noexcept
389  {
390  // To acquire the lock we flip the LSB from 0 to 1.
391 
392  auto __current = _M_val.load(memory_order_relaxed);
393  while (__current & _S_lock_bit)
394  {
395  __detail::__thread_relax();
396  __current = _M_val.load(memory_order_relaxed);
397  }
398 
399  while (!_M_val.compare_exchange_strong(__current,
400  __current | _S_lock_bit,
401  __o,
402  memory_order_relaxed))
403  {
404  __detail::__thread_relax();
405  __current = __current & ~_S_lock_bit;
406  }
407  return reinterpret_cast<pointer>(__current);
408  }
409 
410  // Precondition: caller holds lock!
411  void
412  unlock(memory_order __o) const noexcept
413  {
414  _M_val.fetch_sub(1, __o);
415  }
416 
417  // Swaps the values of *this and __c, and unlocks *this.
418  // Precondition: caller holds lock!
419  void
420  _M_swap_unlock(__count_type& __c, memory_order __o) noexcept
421  {
422  if (__o != memory_order_seq_cst)
423  __o = memory_order_release;
424  auto __x = reinterpret_cast<uintptr_t>(__c._M_pi);
425  __x = _M_val.exchange(__x, __o);
426  __c._M_pi = reinterpret_cast<pointer>(__x & ~_S_lock_bit);
427  }
428 
429 #if __cpp_lib_atomic_wait
430  // Precondition: caller holds lock!
431  void
432  _M_wait_unlock(memory_order __o) const noexcept
433  {
434  auto __v = _M_val.fetch_sub(1, memory_order_relaxed);
435  _M_val.wait(__v & ~_S_lock_bit, __o);
436  }
437 
438  void
439  notify_one() noexcept
440  {
441  _M_val.notify_one();
442  }
443 
444  void
445  notify_all() noexcept
446  {
447  _M_val.notify_all();
448  }
449 #endif
450 
451  private:
452  mutable __atomic_base<uintptr_t> _M_val{0};
453  static constexpr uintptr_t _S_lock_bit{1};
454  };
455 
456  typename _Tp::element_type* _M_ptr;
457  _Atomic_count _M_refcount;
458 
459  static typename _Atomic_count::pointer
460  _S_add_ref(typename _Atomic_count::pointer __p)
461  {
462  if (__p)
463  {
464  if constexpr (__is_shared_ptr<_Tp>)
465  __p->_M_add_ref_copy();
466  else
467  __p->_M_weak_add_ref();
468  }
469  return __p;
470  }
471 
472  constexpr _Sp_atomic() noexcept = default;
473 
474  explicit
475  _Sp_atomic(value_type __r) noexcept
476  : _M_ptr(__r._M_ptr), _M_refcount(std::move(__r._M_refcount))
477  { }
478 
479  ~_Sp_atomic() = default;
480 
481  _Sp_atomic(const _Sp_atomic&) = delete;
482  void operator=(const _Sp_atomic&) = delete;
483 
484  value_type
485  load(memory_order __o) const noexcept
486  {
487  __glibcxx_assert(__o != memory_order_release
488  && __o != memory_order_acq_rel);
489  // Ensure that the correct value of _M_ptr is visible after locking.,
490  // by upgrading relaxed or consume to acquire.
491  if (__o != memory_order_seq_cst)
492  __o = memory_order_acquire;
493 
494  value_type __ret;
495  auto __pi = _M_refcount.lock(__o);
496  __ret._M_ptr = _M_ptr;
497  __ret._M_refcount._M_pi = _S_add_ref(__pi);
498  _M_refcount.unlock(memory_order_relaxed);
499  return __ret;
500  }
501 
502  void
503  swap(value_type& __r, memory_order __o) noexcept
504  {
505  _M_refcount.lock(memory_order_acquire);
506  std::swap(_M_ptr, __r._M_ptr);
507  _M_refcount._M_swap_unlock(__r._M_refcount, __o);
508  }
509 
510  bool
511  compare_exchange_strong(value_type& __expected, value_type __desired,
512  memory_order __o, memory_order __o2) noexcept
513  {
514  bool __result = true;
515  auto __pi = _M_refcount.lock(memory_order_acquire);
516  if (_M_ptr == __expected._M_ptr
517  && __pi == __expected._M_refcount._M_pi)
518  {
519  _M_ptr = __desired._M_ptr;
520  _M_refcount._M_swap_unlock(__desired._M_refcount, __o);
521  }
522  else
523  {
524  _Tp __sink = std::move(__expected);
525  __expected._M_ptr = _M_ptr;
526  __expected._M_refcount._M_pi = _S_add_ref(__pi);
527  _M_refcount.unlock(__o2);
528  __result = false;
529  }
530  return __result;
531  }
532 
533 #if __cpp_lib_atomic_wait
534  void
535  wait(value_type __old, memory_order __o) const noexcept
536  {
537  auto __pi = _M_refcount.lock(memory_order_acquire);
538  if (_M_ptr == __old._M_ptr && __pi == __old._M_refcount._M_pi)
539  _M_refcount._M_wait_unlock(__o);
540  else
541  _M_refcount.unlock(memory_order_relaxed);
542  }
543 
544  void
545  notify_one() noexcept
546  {
547  _M_refcount.notify_one();
548  }
549 
550  void
551  notify_all() noexcept
552  {
553  _M_refcount.notify_all();
554  }
555 #endif
556  };
557 
558  template<typename _Tp>
559  class atomic<shared_ptr<_Tp>>
560  {
561  public:
562  using value_type = shared_ptr<_Tp>;
563 
564  static constexpr bool is_always_lock_free = false;
565 
566  bool
567  is_lock_free() const noexcept
568  { return false; }
569 
570  constexpr atomic() noexcept = default;
571 
572  atomic(shared_ptr<_Tp> __r) noexcept
573  : _M_impl(std::move(__r))
574  { }
575 
576  atomic(const atomic&) = delete;
577  void operator=(const atomic&) = delete;
578 
579  shared_ptr<_Tp>
580  load(memory_order __o = memory_order_seq_cst) const noexcept
581  { return _M_impl.load(__o); }
582 
583  operator shared_ptr<_Tp>() const noexcept
584  { return _M_impl.load(memory_order_seq_cst); }
585 
586  void
587  store(shared_ptr<_Tp> __desired,
588  memory_order __o = memory_order_seq_cst) noexcept
589  { _M_impl.swap(__desired, __o); }
590 
591  void
592  operator=(shared_ptr<_Tp> __desired) noexcept
593  { _M_impl.swap(__desired, memory_order_seq_cst); }
594 
595  shared_ptr<_Tp>
596  exchange(shared_ptr<_Tp> __desired,
597  memory_order __o = memory_order_seq_cst) noexcept
598  {
599  _M_impl.swap(__desired, __o);
600  return __desired;
601  }
602 
603  bool
604  compare_exchange_strong(shared_ptr<_Tp>& __expected,
605  shared_ptr<_Tp> __desired,
606  memory_order __o, memory_order __o2) noexcept
607  {
608  return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
609  }
610 
611  bool
612  compare_exchange_strong(value_type& __expected, value_type __desired,
613  memory_order __o = memory_order_seq_cst) noexcept
614  {
615  memory_order __o2;
616  switch (__o)
617  {
618  case memory_order_acq_rel:
619  __o2 = memory_order_acquire;
620  break;
621  case memory_order_release:
622  __o2 = memory_order_relaxed;
623  break;
624  default:
625  __o2 = __o;
626  }
627  return compare_exchange_strong(__expected, std::move(__desired),
628  __o, __o2);
629  }
630 
631  bool
632  compare_exchange_weak(value_type& __expected, value_type __desired,
633  memory_order __o, memory_order __o2) noexcept
634  {
635  return compare_exchange_strong(__expected, std::move(__desired),
636  __o, __o2);
637  }
638 
639  bool
640  compare_exchange_weak(value_type& __expected, value_type __desired,
641  memory_order __o = memory_order_seq_cst) noexcept
642  {
643  return compare_exchange_strong(__expected, std::move(__desired), __o);
644  }
645 
646 #if __cpp_lib_atomic_wait
647  void
648  wait(value_type __old,
649  memory_order __o = memory_order_seq_cst) const noexcept
650  {
651  _M_impl.wait(std::move(__old), __o);
652  }
653 
654  void
655  notify_one() noexcept
656  {
657  _M_impl.notify_one();
658  }
659 
660  void
661  notify_all() noexcept
662  {
663  _M_impl.notify_all();
664  }
665 #endif
666 
667  private:
668  _Sp_atomic<shared_ptr<_Tp>> _M_impl;
669  };
670 
671  template<typename _Tp>
672  class atomic<weak_ptr<_Tp>>
673  {
674  public:
675  using value_type = weak_ptr<_Tp>;
676 
677  static constexpr bool is_always_lock_free = false;
678 
679  bool
680  is_lock_free() const noexcept
681  { return false; }
682 
683  constexpr atomic() noexcept = default;
684 
685  atomic(weak_ptr<_Tp> __r) noexcept
686  : _M_impl(move(__r))
687  { }
688 
689  atomic(const atomic&) = delete;
690  void operator=(const atomic&) = delete;
691 
692  weak_ptr<_Tp>
693  load(memory_order __o = memory_order_seq_cst) const noexcept
694  { return _M_impl.load(__o); }
695 
696  operator weak_ptr<_Tp>() const noexcept
697  { return _M_impl.load(memory_order_seq_cst); }
698 
699  void
700  store(weak_ptr<_Tp> __desired,
701  memory_order __o = memory_order_seq_cst) noexcept
702  { _M_impl.swap(__desired, __o); }
703 
704  void
705  operator=(weak_ptr<_Tp> __desired) noexcept
706  { _M_impl.swap(__desired, memory_order_seq_cst); }
707 
708  weak_ptr<_Tp>
709  exchange(weak_ptr<_Tp> __desired,
710  memory_order __o = memory_order_seq_cst) noexcept
711  {
712  _M_impl.swap(__desired, __o);
713  return __desired;
714  }
715 
716  bool
717  compare_exchange_strong(weak_ptr<_Tp>& __expected,
718  weak_ptr<_Tp> __desired,
719  memory_order __o, memory_order __o2) noexcept
720  {
721  return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
722  }
723 
724  bool
725  compare_exchange_strong(value_type& __expected, value_type __desired,
726  memory_order __o = memory_order_seq_cst) noexcept
727  {
728  memory_order __o2;
729  switch (__o)
730  {
731  case memory_order_acq_rel:
732  __o2 = memory_order_acquire;
733  break;
734  case memory_order_release:
735  __o2 = memory_order_relaxed;
736  break;
737  default:
738  __o2 = __o;
739  }
740  return compare_exchange_strong(__expected, std::move(__desired),
741  __o, __o2);
742  }
743 
744  bool
745  compare_exchange_weak(value_type& __expected, value_type __desired,
746  memory_order __o, memory_order __o2) noexcept
747  {
748  return compare_exchange_strong(__expected, std::move(__desired),
749  __o, __o2);
750  }
751 
752  bool
753  compare_exchange_weak(value_type& __expected, value_type __desired,
754  memory_order __o = memory_order_seq_cst) noexcept
755  {
756  return compare_exchange_strong(__expected, std::move(__desired), __o);
757  }
758 
759 #if __cpp_lib_atomic_wait
760  void
761  wait(value_type __old,
762  memory_order __o = memory_order_seq_cst) const noexcept
763  {
764  _M_impl.wait(std::move(__old), __o);
765  }
766 
767  void
768  notify_one() noexcept
769  {
770  _M_impl.notify_one();
771  }
772 
773  void
774  notify_all() noexcept
775  {
776  _M_impl.notify_all();
777  }
778 #endif
779 
780  private:
781  _Sp_atomic<weak_ptr<_Tp>> _M_impl;
782  };
783 #endif // C++20
784 
785  /// @} relates shared_ptr
786  /// @} group pointer_abstractions
787 
788 _GLIBCXX_END_NAMESPACE_VERSION
789 } // namespace
790 
791 #endif // _SHARED_PTR_ATOMIC_H
shared_ptr< _Tp > atomic_exchange_explicit(shared_ptr< _Tp > *__p, shared_ptr< _Tp > __r, memory_order)
Atomic exchange for shared_ptr objects.
bool atomic_compare_exchange_strong_explicit(shared_ptr< _Tp > *__p, shared_ptr< _Tp > *__v, shared_ptr< _Tp > __w, memory_order, memory_order)
Atomic compare-and-swap for shared_ptr objects.
void atomic_store_explicit(shared_ptr< _Tp > *__p, shared_ptr< _Tp > __r, memory_order)
Atomic store for shared_ptr objects.
bool atomic_is_lock_free(const __shared_ptr< _Tp, _Lp > *__p)
Report whether shared_ptr atomic operations are lock-free.
void swap(shared_ptr< _Tp > &__a, shared_ptr< _Tp > &__b) noexcept
Swap overload for shared_ptr.
shared_ptr< _Tp > atomic_load_explicit(const shared_ptr< _Tp > *__p, memory_order)
Atomic load for shared_ptr objects.
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition: move.h:104
void swap(any &__x, any &__y) noexcept
Exchange the states of two any objects.
Definition: any:429
memory_order
Enumeration for memory_order.
Definition: atomic_base.h:62
void lock(_L1 &__l1, _L2 &__l2, _L3 &... __l3)
Generic lock.
Definition: mutex:648
ISO C++ entities toplevel namespace is std.
A smart pointer with reference-counted copy semantics.
Primary template owner_less.