libstdc++
shared_ptr_atomic.h
Go to the documentation of this file.
1 // shared_ptr atomic access -*- C++ -*-
2 
3 // Copyright (C) 2014-2022 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/shared_ptr_atomic.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{memory}
28  */
29 
30 #ifndef _SHARED_PTR_ATOMIC_H
31 #define _SHARED_PTR_ATOMIC_H 1
32 
33 #include <bits/atomic_base.h>
34 
35 namespace std _GLIBCXX_VISIBILITY(default)
36 {
37 _GLIBCXX_BEGIN_NAMESPACE_VERSION
38 
39  /**
40  * @addtogroup pointer_abstractions
41  * @{
42  */
43  /// @relates shared_ptr @{
44 
45  /// @cond undocumented
46 
47  struct _Sp_locker
48  {
49  _Sp_locker(const _Sp_locker&) = delete;
50  _Sp_locker& operator=(const _Sp_locker&) = delete;
51 
52 #ifdef __GTHREADS
53  explicit
54  _Sp_locker(const void*) noexcept;
55  _Sp_locker(const void*, const void*) noexcept;
56  ~_Sp_locker();
57 
58  private:
59  unsigned char _M_key1;
60  unsigned char _M_key2;
61 #else
62  explicit _Sp_locker(const void*, const void* = nullptr) { }
63 #endif
64  };
65 
66  /// @endcond
67 
68  /**
69  * @brief Report whether shared_ptr atomic operations are lock-free.
70  * @param __p A non-null pointer to a shared_ptr object.
71  * @return True if atomic access to @c *__p is lock-free, false otherwise.
72  * @{
73  */
74  template<typename _Tp, _Lock_policy _Lp>
75  inline bool
76  atomic_is_lock_free(const __shared_ptr<_Tp, _Lp>* __p)
77  {
78 #ifdef __GTHREADS
79  return __gthread_active_p() == 0;
80 #else
81  return true;
82 #endif
83  }
84 
85  template<typename _Tp>
86  inline bool
87  atomic_is_lock_free(const shared_ptr<_Tp>* __p)
88  { return std::atomic_is_lock_free<_Tp, __default_lock_policy>(__p); }
89 
90  /// @}
91 
92  /**
93  * @brief Atomic load for shared_ptr objects.
94  * @param __p A non-null pointer to a shared_ptr object.
95  * @return @c *__p
96  *
97  * The memory order shall not be @c memory_order_release or
98  * @c memory_order_acq_rel.
99  * @{
100  */
101  template<typename _Tp>
102  inline shared_ptr<_Tp>
104  {
105  _Sp_locker __lock{__p};
106  return *__p;
107  }
108 
109  template<typename _Tp>
110  inline shared_ptr<_Tp>
111  atomic_load(const shared_ptr<_Tp>* __p)
112  { return std::atomic_load_explicit(__p, memory_order_seq_cst); }
113 
114  template<typename _Tp, _Lock_policy _Lp>
115  inline __shared_ptr<_Tp, _Lp>
116  atomic_load_explicit(const __shared_ptr<_Tp, _Lp>* __p, memory_order)
117  {
118  _Sp_locker __lock{__p};
119  return *__p;
120  }
121 
122  template<typename _Tp, _Lock_policy _Lp>
123  inline __shared_ptr<_Tp, _Lp>
124  atomic_load(const __shared_ptr<_Tp, _Lp>* __p)
125  { return std::atomic_load_explicit(__p, memory_order_seq_cst); }
126  /// @}
127 
128  /**
129  * @brief Atomic store for shared_ptr objects.
130  * @param __p A non-null pointer to a shared_ptr object.
131  * @param __r The value to store.
132  *
133  * The memory order shall not be @c memory_order_acquire or
134  * @c memory_order_acq_rel.
135  * @{
136  */
137  template<typename _Tp>
138  inline void
140  memory_order)
141  {
142  _Sp_locker __lock{__p};
143  __p->swap(__r); // use swap so that **__p not destroyed while lock held
144  }
145 
146  template<typename _Tp>
147  inline void
148  atomic_store(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
149  { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); }
150 
151  template<typename _Tp, _Lock_policy _Lp>
152  inline void
153  atomic_store_explicit(__shared_ptr<_Tp, _Lp>* __p,
154  __shared_ptr<_Tp, _Lp> __r,
155  memory_order)
156  {
157  _Sp_locker __lock{__p};
158  __p->swap(__r); // use swap so that **__p not destroyed while lock held
159  }
160 
161  template<typename _Tp, _Lock_policy _Lp>
162  inline void
163  atomic_store(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r)
164  { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); }
165  /// @}
166 
167  /**
168  * @brief Atomic exchange for shared_ptr objects.
169  * @param __p A non-null pointer to a shared_ptr object.
170  * @param __r New value to store in @c *__p.
171  * @return The original value of @c *__p
172  * @{
173  */
174  template<typename _Tp>
175  inline shared_ptr<_Tp>
177  memory_order)
178  {
179  _Sp_locker __lock{__p};
180  __p->swap(__r);
181  return __r;
182  }
183 
184  template<typename _Tp>
185  inline shared_ptr<_Tp>
186  atomic_exchange(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
187  {
188  return std::atomic_exchange_explicit(__p, std::move(__r),
189  memory_order_seq_cst);
190  }
191 
192  template<typename _Tp, _Lock_policy _Lp>
193  inline __shared_ptr<_Tp, _Lp>
194  atomic_exchange_explicit(__shared_ptr<_Tp, _Lp>* __p,
195  __shared_ptr<_Tp, _Lp> __r,
196  memory_order)
197  {
198  _Sp_locker __lock{__p};
199  __p->swap(__r);
200  return __r;
201  }
202 
203  template<typename _Tp, _Lock_policy _Lp>
204  inline __shared_ptr<_Tp, _Lp>
205  atomic_exchange(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r)
206  {
207  return std::atomic_exchange_explicit(__p, std::move(__r),
208  memory_order_seq_cst);
209  }
210  /// @}
211 
212  /**
213  * @brief Atomic compare-and-swap for shared_ptr objects.
214  * @param __p A non-null pointer to a shared_ptr object.
215  * @param __v A non-null pointer to a shared_ptr object.
216  * @param __w A non-null pointer to a shared_ptr object.
217  * @return True if @c *__p was equivalent to @c *__v, false otherwise.
218  *
219  * The memory order for failure shall not be @c memory_order_release or
220  * @c memory_order_acq_rel, or stronger than the memory order for success.
221  * @{
222  */
223  template<typename _Tp>
224  bool
226  shared_ptr<_Tp>* __v,
227  shared_ptr<_Tp> __w,
228  memory_order,
229  memory_order)
230  {
231  shared_ptr<_Tp> __x; // goes out of scope after __lock
232  _Sp_locker __lock{__p, __v};
234  if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p))
235  {
236  __x = std::move(*__p);
237  *__p = std::move(__w);
238  return true;
239  }
240  __x = std::move(*__v);
241  *__v = *__p;
242  return false;
243  }
244 
245  template<typename _Tp>
246  inline bool
247  atomic_compare_exchange_strong(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v,
248  shared_ptr<_Tp> __w)
249  {
250  return std::atomic_compare_exchange_strong_explicit(__p, __v,
251  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
252  }
253 
254  template<typename _Tp>
255  inline bool
256  atomic_compare_exchange_weak_explicit(shared_ptr<_Tp>* __p,
257  shared_ptr<_Tp>* __v,
258  shared_ptr<_Tp> __w,
259  memory_order __success,
260  memory_order __failure)
261  {
262  return std::atomic_compare_exchange_strong_explicit(__p, __v,
263  std::move(__w), __success, __failure);
264  }
265 
266  template<typename _Tp>
267  inline bool
268  atomic_compare_exchange_weak(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v,
269  shared_ptr<_Tp> __w)
270  {
271  return std::atomic_compare_exchange_weak_explicit(__p, __v,
272  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
273  }
274 
275  template<typename _Tp, _Lock_policy _Lp>
276  bool
277  atomic_compare_exchange_strong_explicit(__shared_ptr<_Tp, _Lp>* __p,
278  __shared_ptr<_Tp, _Lp>* __v,
279  __shared_ptr<_Tp, _Lp> __w,
280  memory_order,
281  memory_order)
282  {
283  __shared_ptr<_Tp, _Lp> __x; // goes out of scope after __lock
284  _Sp_locker __lock{__p, __v};
285  owner_less<__shared_ptr<_Tp, _Lp>> __less;
286  if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p))
287  {
288  __x = std::move(*__p);
289  *__p = std::move(__w);
290  return true;
291  }
292  __x = std::move(*__v);
293  *__v = *__p;
294  return false;
295  }
296 
297  template<typename _Tp, _Lock_policy _Lp>
298  inline bool
299  atomic_compare_exchange_strong(__shared_ptr<_Tp, _Lp>* __p,
300  __shared_ptr<_Tp, _Lp>* __v,
301  __shared_ptr<_Tp, _Lp> __w)
302  {
303  return std::atomic_compare_exchange_strong_explicit(__p, __v,
304  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
305  }
306 
307  template<typename _Tp, _Lock_policy _Lp>
308  inline bool
309  atomic_compare_exchange_weak_explicit(__shared_ptr<_Tp, _Lp>* __p,
310  __shared_ptr<_Tp, _Lp>* __v,
311  __shared_ptr<_Tp, _Lp> __w,
312  memory_order __success,
313  memory_order __failure)
314  {
315  return std::atomic_compare_exchange_strong_explicit(__p, __v,
316  std::move(__w), __success, __failure);
317  }
318 
319  template<typename _Tp, _Lock_policy _Lp>
320  inline bool
321  atomic_compare_exchange_weak(__shared_ptr<_Tp, _Lp>* __p,
322  __shared_ptr<_Tp, _Lp>* __v,
323  __shared_ptr<_Tp, _Lp> __w)
324  {
325  return std::atomic_compare_exchange_weak_explicit(__p, __v,
326  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
327  }
328  /// @}
329 
330 #if __cplusplus >= 202002L
331 # define __cpp_lib_atomic_shared_ptr 201711L
332  template<typename _Tp>
333  class atomic;
334 
335  template<typename _Up>
336  static constexpr bool __is_shared_ptr = false;
337  template<typename _Up>
338  static constexpr bool __is_shared_ptr<shared_ptr<_Up>> = true;
339 
340  template<typename _Tp>
341  class _Sp_atomic
342  {
343  using value_type = _Tp;
344 
345  friend class atomic<_Tp>;
346 
347  // An atomic version of __shared_count<> and __weak_count<>.
348  // Stores a _Sp_counted_base<>* but uses the LSB as a lock.
349  struct _Atomic_count
350  {
351  // Either __shared_count<> or __weak_count<>
352  using __count_type = decltype(_Tp::_M_refcount);
353 
354  // _Sp_counted_base<>*
355  using pointer = decltype(__count_type::_M_pi);
356 
357  // Ensure we can use the LSB as the lock bit.
358  static_assert(alignof(remove_pointer_t<pointer>) > 1);
359 
360  constexpr _Atomic_count() noexcept = default;
361 
362  explicit
363  _Atomic_count(__count_type&& __c) noexcept
364  : _M_val(reinterpret_cast<uintptr_t>(__c._M_pi))
365  {
366  __c._M_pi = nullptr;
367  }
368 
369  ~_Atomic_count()
370  {
371  auto __val = _M_val.load(memory_order_relaxed);
372  __glibcxx_assert(!(__val & _S_lock_bit));
373  if (auto __pi = reinterpret_cast<pointer>(__val))
374  {
375  if constexpr (__is_shared_ptr<_Tp>)
376  __pi->_M_release();
377  else
378  __pi->_M_weak_release();
379  }
380  }
381 
382  _Atomic_count(const _Atomic_count&) = delete;
383  _Atomic_count& operator=(const _Atomic_count&) = delete;
384 
385  // Precondition: Caller does not hold lock!
386  // Returns the raw pointer value without the lock bit set.
387  pointer
388  lock(memory_order __o) const noexcept
389  {
390  // To acquire the lock we flip the LSB from 0 to 1.
391 
392  auto __current = _M_val.load(memory_order_relaxed);
393  while (__current & _S_lock_bit)
394  {
395 #if __cpp_lib_atomic_wait
396  __detail::__thread_relax();
397 #endif
398  __current = _M_val.load(memory_order_relaxed);
399  }
400 
401  while (!_M_val.compare_exchange_strong(__current,
402  __current | _S_lock_bit,
403  __o,
404  memory_order_relaxed))
405  {
406 #if __cpp_lib_atomic_wait
407  __detail::__thread_relax();
408 #endif
409  __current = __current & ~_S_lock_bit;
410  }
411  return reinterpret_cast<pointer>(__current);
412  }
413 
414  // Precondition: caller holds lock!
415  void
416  unlock(memory_order __o) const noexcept
417  {
418  _M_val.fetch_sub(1, __o);
419  }
420 
421  // Swaps the values of *this and __c, and unlocks *this.
422  // Precondition: caller holds lock!
423  void
424  _M_swap_unlock(__count_type& __c, memory_order __o) noexcept
425  {
426  if (__o != memory_order_seq_cst)
427  __o = memory_order_release;
428  auto __x = reinterpret_cast<uintptr_t>(__c._M_pi);
429  __x = _M_val.exchange(__x, __o);
430  __c._M_pi = reinterpret_cast<pointer>(__x & ~_S_lock_bit);
431  }
432 
433 #if __cpp_lib_atomic_wait
434  // Precondition: caller holds lock!
435  void
436  _M_wait_unlock(memory_order __o) const noexcept
437  {
438  auto __v = _M_val.fetch_sub(1, memory_order_relaxed);
439  _M_val.wait(__v & ~_S_lock_bit, __o);
440  }
441 
442  void
443  notify_one() noexcept
444  {
445  _M_val.notify_one();
446  }
447 
448  void
449  notify_all() noexcept
450  {
451  _M_val.notify_all();
452  }
453 #endif
454 
455  private:
456  mutable __atomic_base<uintptr_t> _M_val{0};
457  static constexpr uintptr_t _S_lock_bit{1};
458  };
459 
460  typename _Tp::element_type* _M_ptr = nullptr;
461  _Atomic_count _M_refcount;
462 
463  static typename _Atomic_count::pointer
464  _S_add_ref(typename _Atomic_count::pointer __p)
465  {
466  if (__p)
467  {
468  if constexpr (__is_shared_ptr<_Tp>)
469  __p->_M_add_ref_copy();
470  else
471  __p->_M_weak_add_ref();
472  }
473  return __p;
474  }
475 
476  constexpr _Sp_atomic() noexcept = default;
477 
478  explicit
479  _Sp_atomic(value_type __r) noexcept
480  : _M_ptr(__r._M_ptr), _M_refcount(std::move(__r._M_refcount))
481  { }
482 
483  ~_Sp_atomic() = default;
484 
485  _Sp_atomic(const _Sp_atomic&) = delete;
486  void operator=(const _Sp_atomic&) = delete;
487 
488  value_type
489  load(memory_order __o) const noexcept
490  {
491  __glibcxx_assert(__o != memory_order_release
492  && __o != memory_order_acq_rel);
493  // Ensure that the correct value of _M_ptr is visible after locking.,
494  // by upgrading relaxed or consume to acquire.
495  if (__o != memory_order_seq_cst)
496  __o = memory_order_acquire;
497 
498  value_type __ret;
499  auto __pi = _M_refcount.lock(__o);
500  __ret._M_ptr = _M_ptr;
501  __ret._M_refcount._M_pi = _S_add_ref(__pi);
502  _M_refcount.unlock(memory_order_relaxed);
503  return __ret;
504  }
505 
506  void
507  swap(value_type& __r, memory_order __o) noexcept
508  {
509  _M_refcount.lock(memory_order_acquire);
510  std::swap(_M_ptr, __r._M_ptr);
511  _M_refcount._M_swap_unlock(__r._M_refcount, __o);
512  }
513 
514  bool
515  compare_exchange_strong(value_type& __expected, value_type __desired,
516  memory_order __o, memory_order __o2) noexcept
517  {
518  bool __result = true;
519  auto __pi = _M_refcount.lock(memory_order_acquire);
520  if (_M_ptr == __expected._M_ptr
521  && __pi == __expected._M_refcount._M_pi)
522  {
523  _M_ptr = __desired._M_ptr;
524  _M_refcount._M_swap_unlock(__desired._M_refcount, __o);
525  }
526  else
527  {
528  _Tp __sink = std::move(__expected);
529  __expected._M_ptr = _M_ptr;
530  __expected._M_refcount._M_pi = _S_add_ref(__pi);
531  _M_refcount.unlock(__o2);
532  __result = false;
533  }
534  return __result;
535  }
536 
537 #if __cpp_lib_atomic_wait
538  void
539  wait(value_type __old, memory_order __o) const noexcept
540  {
541  auto __pi = _M_refcount.lock(memory_order_acquire);
542  if (_M_ptr == __old._M_ptr && __pi == __old._M_refcount._M_pi)
543  _M_refcount._M_wait_unlock(__o);
544  else
545  _M_refcount.unlock(memory_order_relaxed);
546  }
547 
548  void
549  notify_one() noexcept
550  {
551  _M_refcount.notify_one();
552  }
553 
554  void
555  notify_all() noexcept
556  {
557  _M_refcount.notify_all();
558  }
559 #endif
560  };
561 
562  template<typename _Tp>
563  class atomic<shared_ptr<_Tp>>
564  {
565  public:
566  using value_type = shared_ptr<_Tp>;
567 
568  static constexpr bool is_always_lock_free = false;
569 
570  bool
571  is_lock_free() const noexcept
572  { return false; }
573 
574  constexpr atomic() noexcept = default;
575 
576  // _GLIBCXX_RESOLVE_LIB_DEFECTS
577  // 3661. constinit atomic<shared_ptr<T>> a(nullptr); should work
578  constexpr atomic(nullptr_t) noexcept : atomic() { }
579 
580  atomic(shared_ptr<_Tp> __r) noexcept
581  : _M_impl(std::move(__r))
582  { }
583 
584  atomic(const atomic&) = delete;
585  void operator=(const atomic&) = delete;
586 
587  shared_ptr<_Tp>
588  load(memory_order __o = memory_order_seq_cst) const noexcept
589  { return _M_impl.load(__o); }
590 
591  operator shared_ptr<_Tp>() const noexcept
592  { return _M_impl.load(memory_order_seq_cst); }
593 
594  void
595  store(shared_ptr<_Tp> __desired,
596  memory_order __o = memory_order_seq_cst) noexcept
597  { _M_impl.swap(__desired, __o); }
598 
599  void
600  operator=(shared_ptr<_Tp> __desired) noexcept
601  { _M_impl.swap(__desired, memory_order_seq_cst); }
602 
603  shared_ptr<_Tp>
604  exchange(shared_ptr<_Tp> __desired,
605  memory_order __o = memory_order_seq_cst) noexcept
606  {
607  _M_impl.swap(__desired, __o);
608  return __desired;
609  }
610 
611  bool
612  compare_exchange_strong(shared_ptr<_Tp>& __expected,
613  shared_ptr<_Tp> __desired,
614  memory_order __o, memory_order __o2) noexcept
615  {
616  return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
617  }
618 
619  bool
620  compare_exchange_strong(value_type& __expected, value_type __desired,
621  memory_order __o = memory_order_seq_cst) noexcept
622  {
623  memory_order __o2;
624  switch (__o)
625  {
626  case memory_order_acq_rel:
627  __o2 = memory_order_acquire;
628  break;
629  case memory_order_release:
630  __o2 = memory_order_relaxed;
631  break;
632  default:
633  __o2 = __o;
634  }
635  return compare_exchange_strong(__expected, std::move(__desired),
636  __o, __o2);
637  }
638 
639  bool
640  compare_exchange_weak(value_type& __expected, value_type __desired,
641  memory_order __o, memory_order __o2) noexcept
642  {
643  return compare_exchange_strong(__expected, std::move(__desired),
644  __o, __o2);
645  }
646 
647  bool
648  compare_exchange_weak(value_type& __expected, value_type __desired,
649  memory_order __o = memory_order_seq_cst) noexcept
650  {
651  return compare_exchange_strong(__expected, std::move(__desired), __o);
652  }
653 
654 #if __cpp_lib_atomic_wait
655  void
656  wait(value_type __old,
657  memory_order __o = memory_order_seq_cst) const noexcept
658  {
659  _M_impl.wait(std::move(__old), __o);
660  }
661 
662  void
663  notify_one() noexcept
664  {
665  _M_impl.notify_one();
666  }
667 
668  void
669  notify_all() noexcept
670  {
671  _M_impl.notify_all();
672  }
673 #endif
674 
675  private:
676  _Sp_atomic<shared_ptr<_Tp>> _M_impl;
677  };
678 
679  template<typename _Tp>
680  class atomic<weak_ptr<_Tp>>
681  {
682  public:
683  using value_type = weak_ptr<_Tp>;
684 
685  static constexpr bool is_always_lock_free = false;
686 
687  bool
688  is_lock_free() const noexcept
689  { return false; }
690 
691  constexpr atomic() noexcept = default;
692 
693  atomic(weak_ptr<_Tp> __r) noexcept
694  : _M_impl(move(__r))
695  { }
696 
697  atomic(const atomic&) = delete;
698  void operator=(const atomic&) = delete;
699 
700  weak_ptr<_Tp>
701  load(memory_order __o = memory_order_seq_cst) const noexcept
702  { return _M_impl.load(__o); }
703 
704  operator weak_ptr<_Tp>() const noexcept
705  { return _M_impl.load(memory_order_seq_cst); }
706 
707  void
708  store(weak_ptr<_Tp> __desired,
709  memory_order __o = memory_order_seq_cst) noexcept
710  { _M_impl.swap(__desired, __o); }
711 
712  void
713  operator=(weak_ptr<_Tp> __desired) noexcept
714  { _M_impl.swap(__desired, memory_order_seq_cst); }
715 
716  weak_ptr<_Tp>
717  exchange(weak_ptr<_Tp> __desired,
718  memory_order __o = memory_order_seq_cst) noexcept
719  {
720  _M_impl.swap(__desired, __o);
721  return __desired;
722  }
723 
724  bool
725  compare_exchange_strong(weak_ptr<_Tp>& __expected,
726  weak_ptr<_Tp> __desired,
727  memory_order __o, memory_order __o2) noexcept
728  {
729  return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
730  }
731 
732  bool
733  compare_exchange_strong(value_type& __expected, value_type __desired,
734  memory_order __o = memory_order_seq_cst) noexcept
735  {
736  memory_order __o2;
737  switch (__o)
738  {
739  case memory_order_acq_rel:
740  __o2 = memory_order_acquire;
741  break;
742  case memory_order_release:
743  __o2 = memory_order_relaxed;
744  break;
745  default:
746  __o2 = __o;
747  }
748  return compare_exchange_strong(__expected, std::move(__desired),
749  __o, __o2);
750  }
751 
752  bool
753  compare_exchange_weak(value_type& __expected, value_type __desired,
754  memory_order __o, memory_order __o2) noexcept
755  {
756  return compare_exchange_strong(__expected, std::move(__desired),
757  __o, __o2);
758  }
759 
760  bool
761  compare_exchange_weak(value_type& __expected, value_type __desired,
762  memory_order __o = memory_order_seq_cst) noexcept
763  {
764  return compare_exchange_strong(__expected, std::move(__desired), __o);
765  }
766 
767 #if __cpp_lib_atomic_wait
768  void
769  wait(value_type __old,
770  memory_order __o = memory_order_seq_cst) const noexcept
771  {
772  _M_impl.wait(std::move(__old), __o);
773  }
774 
775  void
776  notify_one() noexcept
777  {
778  _M_impl.notify_one();
779  }
780 
781  void
782  notify_all() noexcept
783  {
784  _M_impl.notify_all();
785  }
786 #endif
787 
788  private:
789  _Sp_atomic<weak_ptr<_Tp>> _M_impl;
790  };
791 #endif // C++20
792 
793  /// @} relates shared_ptr
794  /// @} group pointer_abstractions
795 
796 _GLIBCXX_END_NAMESPACE_VERSION
797 } // namespace
798 
799 #endif // _SHARED_PTR_ATOMIC_H
shared_ptr< _Tp > atomic_exchange_explicit(shared_ptr< _Tp > *__p, shared_ptr< _Tp > __r, memory_order)
Atomic exchange for shared_ptr objects.
bool atomic_compare_exchange_strong_explicit(shared_ptr< _Tp > *__p, shared_ptr< _Tp > *__v, shared_ptr< _Tp > __w, memory_order, memory_order)
Atomic compare-and-swap for shared_ptr objects.
void atomic_store_explicit(shared_ptr< _Tp > *__p, shared_ptr< _Tp > __r, memory_order)
Atomic store for shared_ptr objects.
bool atomic_is_lock_free(const __shared_ptr< _Tp, _Lp > *__p)
Report whether shared_ptr atomic operations are lock-free.
void swap(shared_ptr< _Tp > &__a, shared_ptr< _Tp > &__b) noexcept
Swap overload for shared_ptr.
shared_ptr< _Tp > atomic_load_explicit(const shared_ptr< _Tp > *__p, memory_order)
Atomic load for shared_ptr objects.
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition: move.h:104
void swap(any &__x, any &__y) noexcept
Exchange the states of two any objects.
Definition: any:429
memory_order
Enumeration for memory_order.
Definition: atomic_base.h:62
void lock(_L1 &__l1, _L2 &__l2, _L3 &... __l3)
Generic lock.
Definition: mutex:648
ISO C++ entities toplevel namespace is std.
A smart pointer with reference-counted copy semantics.
Primary template owner_less.