You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

856 lines
24KB

  1. // <shared_mutex> -*- C++ -*-
  2. // Copyright (C) 2013-2020 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file include/shared_mutex
  21. * This is a Standard C++ Library header.
  22. */
  23. #ifndef _GLIBCXX_SHARED_MUTEX
  24. #define _GLIBCXX_SHARED_MUTEX 1
  25. #pragma GCC system_header
  26. #if __cplusplus >= 201402L
  27. #include <bits/c++config.h>
  28. #include <condition_variable>
  29. #include <bits/functexcept.h>
  30. namespace std _GLIBCXX_VISIBILITY(default)
  31. {
  32. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  33. /**
  34. * @addtogroup mutexes
  35. * @{
  36. */
  37. #ifdef _GLIBCXX_HAS_GTHREADS
  38. #if __cplusplus >= 201703L
  39. #define __cpp_lib_shared_mutex 201505
  40. class shared_mutex;
  41. #endif
  42. #define __cpp_lib_shared_timed_mutex 201402
  43. class shared_timed_mutex;
  44. /// @cond undocumented
  45. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
  46. #ifdef __gthrw
  47. #define _GLIBCXX_GTHRW(name) \
  48. __gthrw(pthread_ ## name); \
  49. static inline int \
  50. __glibcxx_ ## name (pthread_rwlock_t *__rwlock) \
  51. { \
  52. if (__gthread_active_p ()) \
  53. return __gthrw_(pthread_ ## name) (__rwlock); \
  54. else \
  55. return 0; \
  56. }
  57. _GLIBCXX_GTHRW(rwlock_rdlock)
  58. _GLIBCXX_GTHRW(rwlock_tryrdlock)
  59. _GLIBCXX_GTHRW(rwlock_wrlock)
  60. _GLIBCXX_GTHRW(rwlock_trywrlock)
  61. _GLIBCXX_GTHRW(rwlock_unlock)
  62. # ifndef PTHREAD_RWLOCK_INITIALIZER
  63. _GLIBCXX_GTHRW(rwlock_destroy)
  64. __gthrw(pthread_rwlock_init);
  65. static inline int
  66. __glibcxx_rwlock_init (pthread_rwlock_t *__rwlock)
  67. {
  68. if (__gthread_active_p ())
  69. return __gthrw_(pthread_rwlock_init) (__rwlock, NULL);
  70. else
  71. return 0;
  72. }
  73. # endif
  74. # if _GTHREAD_USE_MUTEX_TIMEDLOCK
  75. __gthrw(pthread_rwlock_timedrdlock);
  76. static inline int
  77. __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
  78. const timespec *__ts)
  79. {
  80. if (__gthread_active_p ())
  81. return __gthrw_(pthread_rwlock_timedrdlock) (__rwlock, __ts);
  82. else
  83. return 0;
  84. }
  85. __gthrw(pthread_rwlock_timedwrlock);
  86. static inline int
  87. __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
  88. const timespec *__ts)
  89. {
  90. if (__gthread_active_p ())
  91. return __gthrw_(pthread_rwlock_timedwrlock) (__rwlock, __ts);
  92. else
  93. return 0;
  94. }
  95. # endif
  96. #else
  97. static inline int
  98. __glibcxx_rwlock_rdlock (pthread_rwlock_t *__rwlock)
  99. { return pthread_rwlock_rdlock (__rwlock); }
  100. static inline int
  101. __glibcxx_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
  102. { return pthread_rwlock_tryrdlock (__rwlock); }
  103. static inline int
  104. __glibcxx_rwlock_wrlock (pthread_rwlock_t *__rwlock)
  105. { return pthread_rwlock_wrlock (__rwlock); }
  106. static inline int
  107. __glibcxx_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
  108. { return pthread_rwlock_trywrlock (__rwlock); }
  109. static inline int
  110. __glibcxx_rwlock_unlock (pthread_rwlock_t *__rwlock)
  111. { return pthread_rwlock_unlock (__rwlock); }
  112. static inline int
  113. __glibcxx_rwlock_destroy(pthread_rwlock_t *__rwlock)
  114. { return pthread_rwlock_destroy (__rwlock); }
  115. static inline int
  116. __glibcxx_rwlock_init(pthread_rwlock_t *__rwlock)
  117. { return pthread_rwlock_init (__rwlock, NULL); }
  118. # if _GTHREAD_USE_MUTEX_TIMEDLOCK
  119. static inline int
  120. __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
  121. const timespec *__ts)
  122. { return pthread_rwlock_timedrdlock (__rwlock, __ts); }
  123. static inline int
  124. __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
  125. const timespec *__ts)
  126. { return pthread_rwlock_timedwrlock (__rwlock, __ts); }
  127. # endif
  128. #endif
  129. /// A shared mutex type implemented using pthread_rwlock_t.
  130. class __shared_mutex_pthread
  131. {
  132. friend class shared_timed_mutex;
  133. #ifdef PTHREAD_RWLOCK_INITIALIZER
  134. pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
  135. public:
  136. __shared_mutex_pthread() = default;
  137. ~__shared_mutex_pthread() = default;
  138. #else
  139. pthread_rwlock_t _M_rwlock;
  140. public:
  141. __shared_mutex_pthread()
  142. {
  143. int __ret = __glibcxx_rwlock_init(&_M_rwlock);
  144. if (__ret == ENOMEM)
  145. __throw_bad_alloc();
  146. else if (__ret == EAGAIN)
  147. __throw_system_error(int(errc::resource_unavailable_try_again));
  148. else if (__ret == EPERM)
  149. __throw_system_error(int(errc::operation_not_permitted));
  150. // Errors not handled: EBUSY, EINVAL
  151. __glibcxx_assert(__ret == 0);
  152. }
  153. ~__shared_mutex_pthread()
  154. {
  155. int __ret __attribute((__unused__)) = __glibcxx_rwlock_destroy(&_M_rwlock);
  156. // Errors not handled: EBUSY, EINVAL
  157. __glibcxx_assert(__ret == 0);
  158. }
  159. #endif
  160. __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
  161. __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
  162. void
  163. lock()
  164. {
  165. int __ret = __glibcxx_rwlock_wrlock(&_M_rwlock);
  166. if (__ret == EDEADLK)
  167. __throw_system_error(int(errc::resource_deadlock_would_occur));
  168. // Errors not handled: EINVAL
  169. __glibcxx_assert(__ret == 0);
  170. }
  171. bool
  172. try_lock()
  173. {
  174. int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock);
  175. if (__ret == EBUSY) return false;
  176. // Errors not handled: EINVAL
  177. __glibcxx_assert(__ret == 0);
  178. return true;
  179. }
  180. void
  181. unlock()
  182. {
  183. int __ret __attribute((__unused__)) = __glibcxx_rwlock_unlock(&_M_rwlock);
  184. // Errors not handled: EPERM, EBUSY, EINVAL
  185. __glibcxx_assert(__ret == 0);
  186. }
  187. // Shared ownership
  188. void
  189. lock_shared()
  190. {
  191. int __ret;
  192. // We retry if we exceeded the maximum number of read locks supported by
  193. // the POSIX implementation; this can result in busy-waiting, but this
  194. // is okay based on the current specification of forward progress
  195. // guarantees by the standard.
  196. do
  197. __ret = __glibcxx_rwlock_rdlock(&_M_rwlock);
  198. while (__ret == EAGAIN);
  199. if (__ret == EDEADLK)
  200. __throw_system_error(int(errc::resource_deadlock_would_occur));
  201. // Errors not handled: EINVAL
  202. __glibcxx_assert(__ret == 0);
  203. }
  204. bool
  205. try_lock_shared()
  206. {
  207. int __ret = __glibcxx_rwlock_tryrdlock(&_M_rwlock);
  208. // If the maximum number of read locks has been exceeded, we just fail
  209. // to acquire the lock. Unlike for lock(), we are not allowed to throw
  210. // an exception.
  211. if (__ret == EBUSY || __ret == EAGAIN) return false;
  212. // Errors not handled: EINVAL
  213. __glibcxx_assert(__ret == 0);
  214. return true;
  215. }
  216. void
  217. unlock_shared()
  218. {
  219. unlock();
  220. }
  221. void* native_handle() { return &_M_rwlock; }
  222. };
  223. #endif
  224. #if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
  225. /// A shared mutex type implemented using std::condition_variable.
  226. class __shared_mutex_cv
  227. {
  228. friend class shared_timed_mutex;
  229. // Based on Howard Hinnant's reference implementation from N2406.
  230. // The high bit of _M_state is the write-entered flag which is set to
  231. // indicate a writer has taken the lock or is queuing to take the lock.
  232. // The remaining bits are the count of reader locks.
  233. //
  234. // To take a reader lock, block on gate1 while the write-entered flag is
  235. // set or the maximum number of reader locks is held, then increment the
  236. // reader lock count.
  237. // To release, decrement the count, then if the write-entered flag is set
  238. // and the count is zero then signal gate2 to wake a queued writer,
  239. // otherwise if the maximum number of reader locks was held signal gate1
  240. // to wake a reader.
  241. //
  242. // To take a writer lock, block on gate1 while the write-entered flag is
  243. // set, then set the write-entered flag to start queueing, then block on
  244. // gate2 while the number of reader locks is non-zero.
  245. // To release, unset the write-entered flag and signal gate1 to wake all
  246. // blocked readers and writers.
  247. //
  248. // This means that when no reader locks are held readers and writers get
  249. // equal priority. When one or more reader locks is held a writer gets
  250. // priority and no more reader locks can be taken while the writer is
  251. // queued.
  252. // Only locked when accessing _M_state or waiting on condition variables.
  253. mutex _M_mut;
  254. // Used to block while write-entered is set or reader count at maximum.
  255. condition_variable _M_gate1;
  256. // Used to block queued writers while reader count is non-zero.
  257. condition_variable _M_gate2;
  258. // The write-entered flag and reader count.
  259. unsigned _M_state;
  260. static constexpr unsigned _S_write_entered
  261. = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
  262. static constexpr unsigned _S_max_readers = ~_S_write_entered;
  263. // Test whether the write-entered flag is set. _M_mut must be locked.
  264. bool _M_write_entered() const { return _M_state & _S_write_entered; }
  265. // The number of reader locks currently held. _M_mut must be locked.
  266. unsigned _M_readers() const { return _M_state & _S_max_readers; }
  267. public:
  268. __shared_mutex_cv() : _M_state(0) {}
  269. ~__shared_mutex_cv()
  270. {
  271. __glibcxx_assert( _M_state == 0 );
  272. }
  273. __shared_mutex_cv(const __shared_mutex_cv&) = delete;
  274. __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
  275. // Exclusive ownership
  276. void
  277. lock()
  278. {
  279. unique_lock<mutex> __lk(_M_mut);
  280. // Wait until we can set the write-entered flag.
  281. _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
  282. _M_state |= _S_write_entered;
  283. // Then wait until there are no more readers.
  284. _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
  285. }
  286. bool
  287. try_lock()
  288. {
  289. unique_lock<mutex> __lk(_M_mut, try_to_lock);
  290. if (__lk.owns_lock() && _M_state == 0)
  291. {
  292. _M_state = _S_write_entered;
  293. return true;
  294. }
  295. return false;
  296. }
  297. void
  298. unlock()
  299. {
  300. lock_guard<mutex> __lk(_M_mut);
  301. __glibcxx_assert( _M_write_entered() );
  302. _M_state = 0;
  303. // call notify_all() while mutex is held so that another thread can't
  304. // lock and unlock the mutex then destroy *this before we make the call.
  305. _M_gate1.notify_all();
  306. }
  307. // Shared ownership
  308. void
  309. lock_shared()
  310. {
  311. unique_lock<mutex> __lk(_M_mut);
  312. _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
  313. ++_M_state;
  314. }
  315. bool
  316. try_lock_shared()
  317. {
  318. unique_lock<mutex> __lk(_M_mut, try_to_lock);
  319. if (!__lk.owns_lock())
  320. return false;
  321. if (_M_state < _S_max_readers)
  322. {
  323. ++_M_state;
  324. return true;
  325. }
  326. return false;
  327. }
  328. void
  329. unlock_shared()
  330. {
  331. lock_guard<mutex> __lk(_M_mut);
  332. __glibcxx_assert( _M_readers() > 0 );
  333. auto __prev = _M_state--;
  334. if (_M_write_entered())
  335. {
  336. // Wake the queued writer if there are no more readers.
  337. if (_M_readers() == 0)
  338. _M_gate2.notify_one();
  339. // No need to notify gate1 because we give priority to the queued
  340. // writer, and that writer will eventually notify gate1 after it
  341. // clears the write-entered flag.
  342. }
  343. else
  344. {
  345. // Wake any thread that was blocked on reader overflow.
  346. if (__prev == _S_max_readers)
  347. _M_gate1.notify_one();
  348. }
  349. }
  350. };
  351. #endif
  352. /// @endcond
  353. #if __cplusplus > 201402L
  354. /// The standard shared mutex type.
  355. class shared_mutex
  356. {
  357. public:
  358. shared_mutex() = default;
  359. ~shared_mutex() = default;
  360. shared_mutex(const shared_mutex&) = delete;
  361. shared_mutex& operator=(const shared_mutex&) = delete;
  362. // Exclusive ownership
  363. void lock() { _M_impl.lock(); }
  364. bool try_lock() { return _M_impl.try_lock(); }
  365. void unlock() { _M_impl.unlock(); }
  366. // Shared ownership
  367. void lock_shared() { _M_impl.lock_shared(); }
  368. bool try_lock_shared() { return _M_impl.try_lock_shared(); }
  369. void unlock_shared() { _M_impl.unlock_shared(); }
  370. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
  371. typedef void* native_handle_type;
  372. native_handle_type native_handle() { return _M_impl.native_handle(); }
  373. private:
  374. __shared_mutex_pthread _M_impl;
  375. #else
  376. private:
  377. __shared_mutex_cv _M_impl;
  378. #endif
  379. };
  380. #endif // C++17
  381. /// @cond undocumented
  382. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  383. using __shared_timed_mutex_base = __shared_mutex_pthread;
  384. #else
  385. using __shared_timed_mutex_base = __shared_mutex_cv;
  386. #endif
  387. /// @endcond
  388. /// The standard shared timed mutex type.
  389. class shared_timed_mutex
  390. : private __shared_timed_mutex_base
  391. {
  392. using _Base = __shared_timed_mutex_base;
  393. // Must use the same clock as condition_variable for __shared_mutex_cv.
  394. #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
  395. using __clock_t = chrono::steady_clock;
  396. #else
  397. using __clock_t = chrono::system_clock;
  398. #endif
  399. public:
  400. shared_timed_mutex() = default;
  401. ~shared_timed_mutex() = default;
  402. shared_timed_mutex(const shared_timed_mutex&) = delete;
  403. shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
  404. // Exclusive ownership
  405. void lock() { _Base::lock(); }
  406. bool try_lock() { return _Base::try_lock(); }
  407. void unlock() { _Base::unlock(); }
  408. template<typename _Rep, typename _Period>
  409. bool
  410. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  411. {
  412. auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
  413. if (ratio_greater<__clock_t::period, _Period>())
  414. ++__rt;
  415. return try_lock_until(__clock_t::now() + __rt);
  416. }
  417. // Shared ownership
  418. void lock_shared() { _Base::lock_shared(); }
  419. bool try_lock_shared() { return _Base::try_lock_shared(); }
  420. void unlock_shared() { _Base::unlock_shared(); }
  421. template<typename _Rep, typename _Period>
  422. bool
  423. try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rtime)
  424. {
  425. auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
  426. if (ratio_greater<__clock_t::period, _Period>())
  427. ++__rt;
  428. return try_lock_shared_until(__clock_t::now() + __rt);
  429. }
  430. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  431. // Exclusive ownership
  432. template<typename _Duration>
  433. bool
  434. try_lock_until(const chrono::time_point<chrono::system_clock,
  435. _Duration>& __atime)
  436. {
  437. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  438. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  439. __gthread_time_t __ts =
  440. {
  441. static_cast<std::time_t>(__s.time_since_epoch().count()),
  442. static_cast<long>(__ns.count())
  443. };
  444. int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
  445. // On self-deadlock, we just fail to acquire the lock. Technically,
  446. // the program violated the precondition.
  447. if (__ret == ETIMEDOUT || __ret == EDEADLK)
  448. return false;
  449. // Errors not handled: EINVAL
  450. __glibcxx_assert(__ret == 0);
  451. return true;
  452. }
  453. #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
  454. template<typename _Duration>
  455. bool
  456. try_lock_until(const chrono::time_point<chrono::steady_clock,
  457. _Duration>& __atime)
  458. {
  459. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  460. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  461. __gthread_time_t __ts =
  462. {
  463. static_cast<std::time_t>(__s.time_since_epoch().count()),
  464. static_cast<long>(__ns.count())
  465. };
  466. int __ret = pthread_rwlock_clockwrlock(&_M_rwlock, CLOCK_MONOTONIC,
  467. &__ts);
  468. // On self-deadlock, we just fail to acquire the lock. Technically,
  469. // the program violated the precondition.
  470. if (__ret == ETIMEDOUT || __ret == EDEADLK)
  471. return false;
  472. // Errors not handled: EINVAL
  473. __glibcxx_assert(__ret == 0);
  474. return true;
  475. }
  476. #endif
  477. template<typename _Clock, typename _Duration>
  478. bool
  479. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  480. {
  481. #if __cplusplus > 201703L
  482. static_assert(chrono::is_clock_v<_Clock>);
  483. #endif
  484. // The user-supplied clock may not tick at the same rate as
  485. // steady_clock, so we must loop in order to guarantee that
  486. // the timeout has expired before returning false.
  487. typename _Clock::time_point __now = _Clock::now();
  488. do {
  489. auto __rtime = __atime - __now;
  490. if (try_lock_for(__rtime))
  491. return true;
  492. __now = _Clock::now();
  493. } while (__atime > __now);
  494. return false;
  495. }
  496. // Shared ownership
  497. template<typename _Duration>
  498. bool
  499. try_lock_shared_until(const chrono::time_point<chrono::system_clock,
  500. _Duration>& __atime)
  501. {
  502. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  503. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  504. __gthread_time_t __ts =
  505. {
  506. static_cast<std::time_t>(__s.time_since_epoch().count()),
  507. static_cast<long>(__ns.count())
  508. };
  509. int __ret;
  510. // Unlike for lock(), we are not allowed to throw an exception so if
  511. // the maximum number of read locks has been exceeded, or we would
  512. // deadlock, we just try to acquire the lock again (and will time out
  513. // eventually).
  514. // In cases where we would exceed the maximum number of read locks
  515. // throughout the whole time until the timeout, we will fail to
  516. // acquire the lock even if it would be logically free; however, this
  517. // is allowed by the standard, and we made a "strong effort"
  518. // (see C++14 30.4.1.4p26).
  519. // For cases where the implementation detects a deadlock we
  520. // intentionally block and timeout so that an early return isn't
  521. // mistaken for a spurious failure, which might help users realise
  522. // there is a deadlock.
  523. do
  524. __ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts);
  525. while (__ret == EAGAIN || __ret == EDEADLK);
  526. if (__ret == ETIMEDOUT)
  527. return false;
  528. // Errors not handled: EINVAL
  529. __glibcxx_assert(__ret == 0);
  530. return true;
  531. }
  532. #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
  533. template<typename _Duration>
  534. bool
  535. try_lock_shared_until(const chrono::time_point<chrono::steady_clock,
  536. _Duration>& __atime)
  537. {
  538. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  539. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  540. __gthread_time_t __ts =
  541. {
  542. static_cast<std::time_t>(__s.time_since_epoch().count()),
  543. static_cast<long>(__ns.count())
  544. };
  545. int __ret = pthread_rwlock_clockrdlock(&_M_rwlock, CLOCK_MONOTONIC,
  546. &__ts);
  547. // On self-deadlock, we just fail to acquire the lock. Technically,
  548. // the program violated the precondition.
  549. if (__ret == ETIMEDOUT || __ret == EDEADLK)
  550. return false;
  551. // Errors not handled: EINVAL
  552. __glibcxx_assert(__ret == 0);
  553. return true;
  554. }
  555. #endif
  556. template<typename _Clock, typename _Duration>
  557. bool
  558. try_lock_shared_until(const chrono::time_point<_Clock,
  559. _Duration>& __atime)
  560. {
  561. #if __cplusplus > 201703L
  562. static_assert(chrono::is_clock_v<_Clock>);
  563. #endif
  564. // The user-supplied clock may not tick at the same rate as
  565. // steady_clock, so we must loop in order to guarantee that
  566. // the timeout has expired before returning false.
  567. typename _Clock::time_point __now = _Clock::now();
  568. do {
  569. auto __rtime = __atime - __now;
  570. if (try_lock_shared_for(__rtime))
  571. return true;
  572. __now = _Clock::now();
  573. } while (__atime > __now);
  574. return false;
  575. }
  576. #else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
  577. // Exclusive ownership
  578. template<typename _Clock, typename _Duration>
  579. bool
  580. try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
  581. {
  582. unique_lock<mutex> __lk(_M_mut);
  583. if (!_M_gate1.wait_until(__lk, __abs_time,
  584. [=]{ return !_M_write_entered(); }))
  585. {
  586. return false;
  587. }
  588. _M_state |= _S_write_entered;
  589. if (!_M_gate2.wait_until(__lk, __abs_time,
  590. [=]{ return _M_readers() == 0; }))
  591. {
  592. _M_state ^= _S_write_entered;
  593. // Wake all threads blocked while the write-entered flag was set.
  594. _M_gate1.notify_all();
  595. return false;
  596. }
  597. return true;
  598. }
  599. // Shared ownership
  600. template <typename _Clock, typename _Duration>
  601. bool
  602. try_lock_shared_until(const chrono::time_point<_Clock,
  603. _Duration>& __abs_time)
  604. {
  605. unique_lock<mutex> __lk(_M_mut);
  606. if (!_M_gate1.wait_until(__lk, __abs_time,
  607. [=]{ return _M_state < _S_max_readers; }))
  608. {
  609. return false;
  610. }
  611. ++_M_state;
  612. return true;
  613. }
  614. #endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  615. };
  616. #endif // _GLIBCXX_HAS_GTHREADS
  617. /// shared_lock
  618. template<typename _Mutex>
  619. class shared_lock
  620. {
  621. public:
  622. typedef _Mutex mutex_type;
  623. // Shared locking
  624. shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
  625. explicit
  626. shared_lock(mutex_type& __m)
  627. : _M_pm(std::__addressof(__m)), _M_owns(true)
  628. { __m.lock_shared(); }
  629. shared_lock(mutex_type& __m, defer_lock_t) noexcept
  630. : _M_pm(std::__addressof(__m)), _M_owns(false) { }
  631. shared_lock(mutex_type& __m, try_to_lock_t)
  632. : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
  633. shared_lock(mutex_type& __m, adopt_lock_t)
  634. : _M_pm(std::__addressof(__m)), _M_owns(true) { }
  635. template<typename _Clock, typename _Duration>
  636. shared_lock(mutex_type& __m,
  637. const chrono::time_point<_Clock, _Duration>& __abs_time)
  638. : _M_pm(std::__addressof(__m)),
  639. _M_owns(__m.try_lock_shared_until(__abs_time)) { }
  640. template<typename _Rep, typename _Period>
  641. shared_lock(mutex_type& __m,
  642. const chrono::duration<_Rep, _Period>& __rel_time)
  643. : _M_pm(std::__addressof(__m)),
  644. _M_owns(__m.try_lock_shared_for(__rel_time)) { }
  645. ~shared_lock()
  646. {
  647. if (_M_owns)
  648. _M_pm->unlock_shared();
  649. }
  650. shared_lock(shared_lock const&) = delete;
  651. shared_lock& operator=(shared_lock const&) = delete;
  652. shared_lock(shared_lock&& __sl) noexcept : shared_lock()
  653. { swap(__sl); }
  654. shared_lock&
  655. operator=(shared_lock&& __sl) noexcept
  656. {
  657. shared_lock(std::move(__sl)).swap(*this);
  658. return *this;
  659. }
  660. void
  661. lock()
  662. {
  663. _M_lockable();
  664. _M_pm->lock_shared();
  665. _M_owns = true;
  666. }
  667. bool
  668. try_lock()
  669. {
  670. _M_lockable();
  671. return _M_owns = _M_pm->try_lock_shared();
  672. }
  673. template<typename _Rep, typename _Period>
  674. bool
  675. try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
  676. {
  677. _M_lockable();
  678. return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
  679. }
  680. template<typename _Clock, typename _Duration>
  681. bool
  682. try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
  683. {
  684. _M_lockable();
  685. return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
  686. }
  687. void
  688. unlock()
  689. {
  690. if (!_M_owns)
  691. __throw_system_error(int(errc::resource_deadlock_would_occur));
  692. _M_pm->unlock_shared();
  693. _M_owns = false;
  694. }
  695. // Setters
  696. void
  697. swap(shared_lock& __u) noexcept
  698. {
  699. std::swap(_M_pm, __u._M_pm);
  700. std::swap(_M_owns, __u._M_owns);
  701. }
  702. mutex_type*
  703. release() noexcept
  704. {
  705. _M_owns = false;
  706. return std::exchange(_M_pm, nullptr);
  707. }
  708. // Getters
  709. bool owns_lock() const noexcept { return _M_owns; }
  710. explicit operator bool() const noexcept { return _M_owns; }
  711. mutex_type* mutex() const noexcept { return _M_pm; }
  712. private:
  713. void
  714. _M_lockable() const
  715. {
  716. if (_M_pm == nullptr)
  717. __throw_system_error(int(errc::operation_not_permitted));
  718. if (_M_owns)
  719. __throw_system_error(int(errc::resource_deadlock_would_occur));
  720. }
  721. mutex_type* _M_pm;
  722. bool _M_owns;
  723. };
  724. /// Swap specialization for shared_lock
  725. /// @relates shared_mutex
  726. template<typename _Mutex>
  727. void
  728. swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
  729. { __x.swap(__y); }
  730. // @} group mutexes
  731. _GLIBCXX_END_NAMESPACE_VERSION
  732. } // namespace
  733. #endif // C++14
  734. #endif // _GLIBCXX_SHARED_MUTEX