libstdc++
shared_mutex
Go to the documentation of this file.
1 // <shared_mutex> -*- C++ -*-
2 
3 // Copyright (C) 2013-2020 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file include/shared_mutex
26  * This is a Standard C++ Library header.
27  */
28 
29 #ifndef _GLIBCXX_SHARED_MUTEX
30 #define _GLIBCXX_SHARED_MUTEX 1
31 
32 #pragma GCC system_header
33 
34 #if __cplusplus >= 201402L
35 
36 #include <bits/c++config.h>
37 #include <condition_variable>
38 #include <bits/functexcept.h>
39 
40 namespace std _GLIBCXX_VISIBILITY(default)
41 {
42 _GLIBCXX_BEGIN_NAMESPACE_VERSION
43 
44  /**
45  * @addtogroup mutexes
46  * @{
47  */
48 
49 #ifdef _GLIBCXX_HAS_GTHREADS
50 
51 #if __cplusplus >= 201703L
52 #define __cpp_lib_shared_mutex 201505
53  class shared_mutex;
54 #endif
55 
56 #define __cpp_lib_shared_timed_mutex 201402
57  class shared_timed_mutex;
58 
59  /// @cond undocumented
60 
61 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
62 #ifdef __gthrw
63 #define _GLIBCXX_GTHRW(name) \
64  __gthrw(pthread_ ## name); \
65  static inline int \
66  __glibcxx_ ## name (pthread_rwlock_t *__rwlock) \
67  { \
68  if (__gthread_active_p ()) \
69  return __gthrw_(pthread_ ## name) (__rwlock); \
70  else \
71  return 0; \
72  }
73  _GLIBCXX_GTHRW(rwlock_rdlock)
74  _GLIBCXX_GTHRW(rwlock_tryrdlock)
75  _GLIBCXX_GTHRW(rwlock_wrlock)
76  _GLIBCXX_GTHRW(rwlock_trywrlock)
77  _GLIBCXX_GTHRW(rwlock_unlock)
78 # ifndef PTHREAD_RWLOCK_INITIALIZER
79  _GLIBCXX_GTHRW(rwlock_destroy)
80  __gthrw(pthread_rwlock_init);
81  static inline int
82  __glibcxx_rwlock_init (pthread_rwlock_t *__rwlock)
83  {
84  if (__gthread_active_p ())
85  return __gthrw_(pthread_rwlock_init) (__rwlock, NULL);
86  else
87  return 0;
88  }
89 # endif
90 # if _GTHREAD_USE_MUTEX_TIMEDLOCK
91  __gthrw(pthread_rwlock_timedrdlock);
92  static inline int
93  __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
94  const timespec *__ts)
95  {
96  if (__gthread_active_p ())
97  return __gthrw_(pthread_rwlock_timedrdlock) (__rwlock, __ts);
98  else
99  return 0;
100  }
101  __gthrw(pthread_rwlock_timedwrlock);
102  static inline int
103  __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
104  const timespec *__ts)
105  {
106  if (__gthread_active_p ())
107  return __gthrw_(pthread_rwlock_timedwrlock) (__rwlock, __ts);
108  else
109  return 0;
110  }
111 # endif
112 #else
113  static inline int
114  __glibcxx_rwlock_rdlock (pthread_rwlock_t *__rwlock)
115  { return pthread_rwlock_rdlock (__rwlock); }
116  static inline int
117  __glibcxx_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
118  { return pthread_rwlock_tryrdlock (__rwlock); }
119  static inline int
120  __glibcxx_rwlock_wrlock (pthread_rwlock_t *__rwlock)
121  { return pthread_rwlock_wrlock (__rwlock); }
122  static inline int
123  __glibcxx_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
124  { return pthread_rwlock_trywrlock (__rwlock); }
125  static inline int
126  __glibcxx_rwlock_unlock (pthread_rwlock_t *__rwlock)
127  { return pthread_rwlock_unlock (__rwlock); }
128  static inline int
129  __glibcxx_rwlock_destroy(pthread_rwlock_t *__rwlock)
130  { return pthread_rwlock_destroy (__rwlock); }
131  static inline int
132  __glibcxx_rwlock_init(pthread_rwlock_t *__rwlock)
133  { return pthread_rwlock_init (__rwlock, NULL); }
134 # if _GTHREAD_USE_MUTEX_TIMEDLOCK
135  static inline int
136  __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
137  const timespec *__ts)
138  { return pthread_rwlock_timedrdlock (__rwlock, __ts); }
139  static inline int
140  __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
141  const timespec *__ts)
142  { return pthread_rwlock_timedwrlock (__rwlock, __ts); }
143 # endif
144 #endif
145 
146  /// A shared mutex type implemented using pthread_rwlock_t.
147  class __shared_mutex_pthread
148  {
149  friend class shared_timed_mutex;
150 
151 #ifdef PTHREAD_RWLOCK_INITIALIZER
152  pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
153 
154  public:
155  __shared_mutex_pthread() = default;
156  ~__shared_mutex_pthread() = default;
157 #else
158  pthread_rwlock_t _M_rwlock;
159 
160  public:
161  __shared_mutex_pthread()
162  {
163  int __ret = __glibcxx_rwlock_init(&_M_rwlock);
164  if (__ret == ENOMEM)
165  __throw_bad_alloc();
166  else if (__ret == EAGAIN)
167  __throw_system_error(int(errc::resource_unavailable_try_again));
168  else if (__ret == EPERM)
169  __throw_system_error(int(errc::operation_not_permitted));
170  // Errors not handled: EBUSY, EINVAL
171  __glibcxx_assert(__ret == 0);
172  }
173 
174  ~__shared_mutex_pthread()
175  {
176  int __ret __attribute((__unused__)) = __glibcxx_rwlock_destroy(&_M_rwlock);
177  // Errors not handled: EBUSY, EINVAL
178  __glibcxx_assert(__ret == 0);
179  }
180 #endif
181 
182  __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
183  __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
184 
185  void
186  lock()
187  {
188  int __ret = __glibcxx_rwlock_wrlock(&_M_rwlock);
189  if (__ret == EDEADLK)
190  __throw_system_error(int(errc::resource_deadlock_would_occur));
191  // Errors not handled: EINVAL
192  __glibcxx_assert(__ret == 0);
193  }
194 
195  bool
196  try_lock()
197  {
198  int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock);
199  if (__ret == EBUSY) return false;
200  // Errors not handled: EINVAL
201  __glibcxx_assert(__ret == 0);
202  return true;
203  }
204 
205  void
206  unlock()
207  {
208  int __ret __attribute((__unused__)) = __glibcxx_rwlock_unlock(&_M_rwlock);
209  // Errors not handled: EPERM, EBUSY, EINVAL
210  __glibcxx_assert(__ret == 0);
211  }
212 
213  // Shared ownership
214 
215  void
216  lock_shared()
217  {
218  int __ret;
219  // We retry if we exceeded the maximum number of read locks supported by
220  // the POSIX implementation; this can result in busy-waiting, but this
221  // is okay based on the current specification of forward progress
222  // guarantees by the standard.
223  do
224  __ret = __glibcxx_rwlock_rdlock(&_M_rwlock);
225  while (__ret == EAGAIN);
226  if (__ret == EDEADLK)
227  __throw_system_error(int(errc::resource_deadlock_would_occur));
228  // Errors not handled: EINVAL
229  __glibcxx_assert(__ret == 0);
230  }
231 
232  bool
233  try_lock_shared()
234  {
235  int __ret = __glibcxx_rwlock_tryrdlock(&_M_rwlock);
236  // If the maximum number of read locks has been exceeded, we just fail
237  // to acquire the lock. Unlike for lock(), we are not allowed to throw
238  // an exception.
239  if (__ret == EBUSY || __ret == EAGAIN) return false;
240  // Errors not handled: EINVAL
241  __glibcxx_assert(__ret == 0);
242  return true;
243  }
244 
245  void
246  unlock_shared()
247  {
248  unlock();
249  }
250 
251  void* native_handle() { return &_M_rwlock; }
252  };
253 #endif
254 
255 #if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
256  /// A shared mutex type implemented using std::condition_variable.
257  class __shared_mutex_cv
258  {
259  friend class shared_timed_mutex;
260 
261  // Based on Howard Hinnant's reference implementation from N2406.
262 
263  // The high bit of _M_state is the write-entered flag which is set to
264  // indicate a writer has taken the lock or is queuing to take the lock.
265  // The remaining bits are the count of reader locks.
266  //
267  // To take a reader lock, block on gate1 while the write-entered flag is
268  // set or the maximum number of reader locks is held, then increment the
269  // reader lock count.
270  // To release, decrement the count, then if the write-entered flag is set
271  // and the count is zero then signal gate2 to wake a queued writer,
272  // otherwise if the maximum number of reader locks was held signal gate1
273  // to wake a reader.
274  //
275  // To take a writer lock, block on gate1 while the write-entered flag is
276  // set, then set the write-entered flag to start queueing, then block on
277  // gate2 while the number of reader locks is non-zero.
278  // To release, unset the write-entered flag and signal gate1 to wake all
279  // blocked readers and writers.
280  //
281  // This means that when no reader locks are held readers and writers get
282  // equal priority. When one or more reader locks is held a writer gets
283  // priority and no more reader locks can be taken while the writer is
284  // queued.
285 
286  // Only locked when accessing _M_state or waiting on condition variables.
287  mutex _M_mut;
288  // Used to block while write-entered is set or reader count at maximum.
289  condition_variable _M_gate1;
290  // Used to block queued writers while reader count is non-zero.
291  condition_variable _M_gate2;
292  // The write-entered flag and reader count.
293  unsigned _M_state;
294 
295  static constexpr unsigned _S_write_entered
296  = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
297  static constexpr unsigned _S_max_readers = ~_S_write_entered;
298 
299  // Test whether the write-entered flag is set. _M_mut must be locked.
300  bool _M_write_entered() const { return _M_state & _S_write_entered; }
301 
302  // The number of reader locks currently held. _M_mut must be locked.
303  unsigned _M_readers() const { return _M_state & _S_max_readers; }
304 
305  public:
306  __shared_mutex_cv() : _M_state(0) {}
307 
308  ~__shared_mutex_cv()
309  {
310  __glibcxx_assert( _M_state == 0 );
311  }
312 
313  __shared_mutex_cv(const __shared_mutex_cv&) = delete;
314  __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
315 
316  // Exclusive ownership
317 
318  void
319  lock()
320  {
321  unique_lock<mutex> __lk(_M_mut);
322  // Wait until we can set the write-entered flag.
323  _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
324  _M_state |= _S_write_entered;
325  // Then wait until there are no more readers.
326  _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
327  }
328 
329  bool
330  try_lock()
331  {
332  unique_lock<mutex> __lk(_M_mut, try_to_lock);
333  if (__lk.owns_lock() && _M_state == 0)
334  {
335  _M_state = _S_write_entered;
336  return true;
337  }
338  return false;
339  }
340 
341  void
342  unlock()
343  {
344  lock_guard<mutex> __lk(_M_mut);
345  __glibcxx_assert( _M_write_entered() );
346  _M_state = 0;
347  // call notify_all() while mutex is held so that another thread can't
348  // lock and unlock the mutex then destroy *this before we make the call.
349  _M_gate1.notify_all();
350  }
351 
352  // Shared ownership
353 
354  void
355  lock_shared()
356  {
357  unique_lock<mutex> __lk(_M_mut);
358  _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
359  ++_M_state;
360  }
361 
362  bool
363  try_lock_shared()
364  {
365  unique_lock<mutex> __lk(_M_mut, try_to_lock);
366  if (!__lk.owns_lock())
367  return false;
368  if (_M_state < _S_max_readers)
369  {
370  ++_M_state;
371  return true;
372  }
373  return false;
374  }
375 
376  void
377  unlock_shared()
378  {
379  lock_guard<mutex> __lk(_M_mut);
380  __glibcxx_assert( _M_readers() > 0 );
381  auto __prev = _M_state--;
382  if (_M_write_entered())
383  {
384  // Wake the queued writer if there are no more readers.
385  if (_M_readers() == 0)
386  _M_gate2.notify_one();
387  // No need to notify gate1 because we give priority to the queued
388  // writer, and that writer will eventually notify gate1 after it
389  // clears the write-entered flag.
390  }
391  else
392  {
393  // Wake any thread that was blocked on reader overflow.
394  if (__prev == _S_max_readers)
395  _M_gate1.notify_one();
396  }
397  }
398  };
399 #endif
400  /// @endcond
401 
402 #if __cplusplus > 201402L
403  /// The standard shared mutex type.
404  class shared_mutex
405  {
406  public:
407  shared_mutex() = default;
408  ~shared_mutex() = default;
409 
410  shared_mutex(const shared_mutex&) = delete;
411  shared_mutex& operator=(const shared_mutex&) = delete;
412 
413  // Exclusive ownership
414 
415  void lock() { _M_impl.lock(); }
416  bool try_lock() { return _M_impl.try_lock(); }
417  void unlock() { _M_impl.unlock(); }
418 
419  // Shared ownership
420 
421  void lock_shared() { _M_impl.lock_shared(); }
422  bool try_lock_shared() { return _M_impl.try_lock_shared(); }
423  void unlock_shared() { _M_impl.unlock_shared(); }
424 
425 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
426  typedef void* native_handle_type;
427  native_handle_type native_handle() { return _M_impl.native_handle(); }
428 
429  private:
430  __shared_mutex_pthread _M_impl;
431 #else
432  private:
433  __shared_mutex_cv _M_impl;
434 #endif
435  };
436 #endif // C++17
437 
438  /// @cond undocumented
439 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
440  using __shared_timed_mutex_base = __shared_mutex_pthread;
441 #else
442  using __shared_timed_mutex_base = __shared_mutex_cv;
443 #endif
444  /// @endcond
445 
446  /// The standard shared timed mutex type.
447  class shared_timed_mutex
448  : private __shared_timed_mutex_base
449  {
450  using _Base = __shared_timed_mutex_base;
451 
452  // Must use the same clock as condition_variable for __shared_mutex_cv.
453 #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
454  using __clock_t = chrono::steady_clock;
455 #else
456  using __clock_t = chrono::system_clock;
457 #endif
458 
459  public:
460  shared_timed_mutex() = default;
461  ~shared_timed_mutex() = default;
462 
463  shared_timed_mutex(const shared_timed_mutex&) = delete;
464  shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
465 
466  // Exclusive ownership
467 
468  void lock() { _Base::lock(); }
469  bool try_lock() { return _Base::try_lock(); }
470  void unlock() { _Base::unlock(); }
471 
472  template<typename _Rep, typename _Period>
473  bool
474  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
475  {
476  auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
477  if (ratio_greater<__clock_t::period, _Period>())
478  ++__rt;
479  return try_lock_until(__clock_t::now() + __rt);
480  }
481 
482  // Shared ownership
483 
484  void lock_shared() { _Base::lock_shared(); }
485  bool try_lock_shared() { return _Base::try_lock_shared(); }
486  void unlock_shared() { _Base::unlock_shared(); }
487 
488  template<typename _Rep, typename _Period>
489  bool
490  try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rtime)
491  {
492  auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
493  if (ratio_greater<__clock_t::period, _Period>())
494  ++__rt;
495  return try_lock_shared_until(__clock_t::now() + __rt);
496  }
497 
498 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
499 
500  // Exclusive ownership
501 
502  template<typename _Duration>
503  bool
504  try_lock_until(const chrono::time_point<chrono::system_clock,
505  _Duration>& __atime)
506  {
507  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
508  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
509 
510  __gthread_time_t __ts =
511  {
512  static_cast<std::time_t>(__s.time_since_epoch().count()),
513  static_cast<long>(__ns.count())
514  };
515 
516  int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
517  // On self-deadlock, we just fail to acquire the lock. Technically,
518  // the program violated the precondition.
519  if (__ret == ETIMEDOUT || __ret == EDEADLK)
520  return false;
521  // Errors not handled: EINVAL
522  __glibcxx_assert(__ret == 0);
523  return true;
524  }
525 
526 #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
527  template<typename _Duration>
528  bool
529  try_lock_until(const chrono::time_point<chrono::steady_clock,
530  _Duration>& __atime)
531  {
532  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
533  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
534 
535  __gthread_time_t __ts =
536  {
537  static_cast<std::time_t>(__s.time_since_epoch().count()),
538  static_cast<long>(__ns.count())
539  };
540 
541  int __ret = pthread_rwlock_clockwrlock(&_M_rwlock, CLOCK_MONOTONIC,
542  &__ts);
543  // On self-deadlock, we just fail to acquire the lock. Technically,
544  // the program violated the precondition.
545  if (__ret == ETIMEDOUT || __ret == EDEADLK)
546  return false;
547  // Errors not handled: EINVAL
548  __glibcxx_assert(__ret == 0);
549  return true;
550  }
551 #endif
552 
553  template<typename _Clock, typename _Duration>
554  bool
555  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
556  {
557  // The user-supplied clock may not tick at the same rate as
558  // steady_clock, so we must loop in order to guarantee that
559  // the timeout has expired before returning false.
560  typename _Clock::time_point __now = _Clock::now();
561  do {
562  auto __rtime = __atime - __now;
563  if (try_lock_for(__rtime))
564  return true;
565  __now = _Clock::now();
566  } while (__atime > __now);
567  return false;
568  }
569 
570  // Shared ownership
571 
572  template<typename _Duration>
573  bool
574  try_lock_shared_until(const chrono::time_point<chrono::system_clock,
575  _Duration>& __atime)
576  {
577  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
578  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
579 
580  __gthread_time_t __ts =
581  {
582  static_cast<std::time_t>(__s.time_since_epoch().count()),
583  static_cast<long>(__ns.count())
584  };
585 
586  int __ret;
587  // Unlike for lock(), we are not allowed to throw an exception so if
588  // the maximum number of read locks has been exceeded, or we would
589  // deadlock, we just try to acquire the lock again (and will time out
590  // eventually).
591  // In cases where we would exceed the maximum number of read locks
592  // throughout the whole time until the timeout, we will fail to
593  // acquire the lock even if it would be logically free; however, this
594  // is allowed by the standard, and we made a "strong effort"
595  // (see C++14 30.4.1.4p26).
596  // For cases where the implementation detects a deadlock we
597  // intentionally block and timeout so that an early return isn't
598  // mistaken for a spurious failure, which might help users realise
599  // there is a deadlock.
600  do
601  __ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts);
602  while (__ret == EAGAIN || __ret == EDEADLK);
603  if (__ret == ETIMEDOUT)
604  return false;
605  // Errors not handled: EINVAL
606  __glibcxx_assert(__ret == 0);
607  return true;
608  }
609 
610 #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
611  template<typename _Duration>
612  bool
613  try_lock_shared_until(const chrono::time_point<chrono::steady_clock,
614  _Duration>& __atime)
615  {
616  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
617  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
618 
619  __gthread_time_t __ts =
620  {
621  static_cast<std::time_t>(__s.time_since_epoch().count()),
622  static_cast<long>(__ns.count())
623  };
624 
625  int __ret = pthread_rwlock_clockrdlock(&_M_rwlock, CLOCK_MONOTONIC,
626  &__ts);
627  // On self-deadlock, we just fail to acquire the lock. Technically,
628  // the program violated the precondition.
629  if (__ret == ETIMEDOUT || __ret == EDEADLK)
630  return false;
631  // Errors not handled: EINVAL
632  __glibcxx_assert(__ret == 0);
633  return true;
634  }
635 #endif
636 
637  template<typename _Clock, typename _Duration>
638  bool
639  try_lock_shared_until(const chrono::time_point<_Clock,
640  _Duration>& __atime)
641  {
642  // The user-supplied clock may not tick at the same rate as
643  // steady_clock, so we must loop in order to guarantee that
644  // the timeout has expired before returning false.
645  typename _Clock::time_point __now = _Clock::now();
646  do {
647  auto __rtime = __atime - __now;
648  if (try_lock_shared_for(__rtime))
649  return true;
650  __now = _Clock::now();
651  } while (__atime > __now);
652  return false;
653  }
654 
655 #else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
656 
657  // Exclusive ownership
658 
659  template<typename _Clock, typename _Duration>
660  bool
661  try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
662  {
663  unique_lock<mutex> __lk(_M_mut);
664  if (!_M_gate1.wait_until(__lk, __abs_time,
665  [=]{ return !_M_write_entered(); }))
666  {
667  return false;
668  }
669  _M_state |= _S_write_entered;
670  if (!_M_gate2.wait_until(__lk, __abs_time,
671  [=]{ return _M_readers() == 0; }))
672  {
673  _M_state ^= _S_write_entered;
674  // Wake all threads blocked while the write-entered flag was set.
675  _M_gate1.notify_all();
676  return false;
677  }
678  return true;
679  }
680 
681  // Shared ownership
682 
683  template <typename _Clock, typename _Duration>
684  bool
685  try_lock_shared_until(const chrono::time_point<_Clock,
686  _Duration>& __abs_time)
687  {
688  unique_lock<mutex> __lk(_M_mut);
689  if (!_M_gate1.wait_until(__lk, __abs_time,
690  [=]{ return _M_state < _S_max_readers; }))
691  {
692  return false;
693  }
694  ++_M_state;
695  return true;
696  }
697 
698 #endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
699  };
700 #endif // _GLIBCXX_HAS_GTHREADS
701 
702  /// shared_lock
703  template<typename _Mutex>
704  class shared_lock
705  {
706  public:
707  typedef _Mutex mutex_type;
708 
709  // Shared locking
710 
711  shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
712 
713  explicit
714  shared_lock(mutex_type& __m)
715  : _M_pm(std::__addressof(__m)), _M_owns(true)
716  { __m.lock_shared(); }
717 
718  shared_lock(mutex_type& __m, defer_lock_t) noexcept
719  : _M_pm(std::__addressof(__m)), _M_owns(false) { }
720 
721  shared_lock(mutex_type& __m, try_to_lock_t)
722  : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
723 
724  shared_lock(mutex_type& __m, adopt_lock_t)
725  : _M_pm(std::__addressof(__m)), _M_owns(true) { }
726 
727  template<typename _Clock, typename _Duration>
728  shared_lock(mutex_type& __m,
729  const chrono::time_point<_Clock, _Duration>& __abs_time)
730  : _M_pm(std::__addressof(__m)),
731  _M_owns(__m.try_lock_shared_until(__abs_time)) { }
732 
733  template<typename _Rep, typename _Period>
734  shared_lock(mutex_type& __m,
735  const chrono::duration<_Rep, _Period>& __rel_time)
736  : _M_pm(std::__addressof(__m)),
737  _M_owns(__m.try_lock_shared_for(__rel_time)) { }
738 
739  ~shared_lock()
740  {
741  if (_M_owns)
742  _M_pm->unlock_shared();
743  }
744 
745  shared_lock(shared_lock const&) = delete;
746  shared_lock& operator=(shared_lock const&) = delete;
747 
748  shared_lock(shared_lock&& __sl) noexcept : shared_lock()
749  { swap(__sl); }
750 
751  shared_lock&
752  operator=(shared_lock&& __sl) noexcept
753  {
754  shared_lock(std::move(__sl)).swap(*this);
755  return *this;
756  }
757 
758  void
759  lock()
760  {
761  _M_lockable();
762  _M_pm->lock_shared();
763  _M_owns = true;
764  }
765 
766  bool
767  try_lock()
768  {
769  _M_lockable();
770  return _M_owns = _M_pm->try_lock_shared();
771  }
772 
773  template<typename _Rep, typename _Period>
774  bool
775  try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
776  {
777  _M_lockable();
778  return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
779  }
780 
781  template<typename _Clock, typename _Duration>
782  bool
783  try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
784  {
785  _M_lockable();
786  return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
787  }
788 
789  void
790  unlock()
791  {
792  if (!_M_owns)
793  __throw_system_error(int(errc::resource_deadlock_would_occur));
794  _M_pm->unlock_shared();
795  _M_owns = false;
796  }
797 
798  // Setters
799 
800  void
801  swap(shared_lock& __u) noexcept
802  {
803  std::swap(_M_pm, __u._M_pm);
804  std::swap(_M_owns, __u._M_owns);
805  }
806 
807  mutex_type*
808  release() noexcept
809  {
810  _M_owns = false;
811  return std::exchange(_M_pm, nullptr);
812  }
813 
814  // Getters
815 
816  bool owns_lock() const noexcept { return _M_owns; }
817 
818  explicit operator bool() const noexcept { return _M_owns; }
819 
820  mutex_type* mutex() const noexcept { return _M_pm; }
821 
822  private:
823  void
824  _M_lockable() const
825  {
826  if (_M_pm == nullptr)
827  __throw_system_error(int(errc::operation_not_permitted));
828  if (_M_owns)
829  __throw_system_error(int(errc::resource_deadlock_would_occur));
830  }
831 
832  mutex_type* _M_pm;
833  bool _M_owns;
834  };
835 
836  /// Swap specialization for shared_lock
837  /// @relates shared_mutex
838  template<typename _Mutex>
839  void
840  swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
841  { __x.swap(__y); }
842 
843  // @} group mutexes
844 _GLIBCXX_END_NAMESPACE_VERSION
845 } // namespace
846 
847 #endif // C++14
848 
849 #endif // _GLIBCXX_SHARED_MUTEX