You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

754 line
19KB

  1. // <mutex> -*- C++ -*-
  2. // Copyright (C) 2003-2020 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file include/mutex
  21. * This is a Standard C++ Library header.
  22. */
  23. #ifndef _GLIBCXX_MUTEX
  24. #define _GLIBCXX_MUTEX 1
  25. #pragma GCC system_header
  26. #if __cplusplus < 201103L
  27. # include <bits/c++0x_warning.h>
  28. #else
  29. #include <tuple>
  30. #include <chrono>
  31. #include <exception>
  32. #include <type_traits>
  33. #include <system_error>
  34. #include <bits/std_mutex.h>
  35. #include <bits/unique_lock.h>
  36. #if ! _GTHREAD_USE_MUTEX_TIMEDLOCK
  37. # include <condition_variable>
  38. # include <thread>
  39. #endif
  40. #ifndef _GLIBCXX_HAVE_TLS
  41. # include <bits/std_function.h>
  42. #endif
  43. namespace std _GLIBCXX_VISIBILITY(default)
  44. {
  45. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  46. /**
  47. * @addtogroup mutexes
  48. * @{
  49. */
  50. #ifdef _GLIBCXX_HAS_GTHREADS
  51. // Common base class for std::recursive_mutex and std::recursive_timed_mutex
  52. class __recursive_mutex_base
  53. {
  54. protected:
  55. typedef __gthread_recursive_mutex_t __native_type;
  56. __recursive_mutex_base(const __recursive_mutex_base&) = delete;
  57. __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
  58. #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
  59. __native_type _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
  60. __recursive_mutex_base() = default;
  61. #else
  62. __native_type _M_mutex;
  63. __recursive_mutex_base()
  64. {
  65. // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
  66. __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
  67. }
  68. ~__recursive_mutex_base()
  69. { __gthread_recursive_mutex_destroy(&_M_mutex); }
  70. #endif
  71. };
  72. /// The standard recursive mutex type.
  73. class recursive_mutex : private __recursive_mutex_base
  74. {
  75. public:
  76. typedef __native_type* native_handle_type;
  77. recursive_mutex() = default;
  78. ~recursive_mutex() = default;
  79. recursive_mutex(const recursive_mutex&) = delete;
  80. recursive_mutex& operator=(const recursive_mutex&) = delete;
  81. void
  82. lock()
  83. {
  84. int __e = __gthread_recursive_mutex_lock(&_M_mutex);
  85. // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
  86. if (__e)
  87. __throw_system_error(__e);
  88. }
  89. bool
  90. try_lock() noexcept
  91. {
  92. // XXX EINVAL, EAGAIN, EBUSY
  93. return !__gthread_recursive_mutex_trylock(&_M_mutex);
  94. }
  95. void
  96. unlock()
  97. {
  98. // XXX EINVAL, EAGAIN, EBUSY
  99. __gthread_recursive_mutex_unlock(&_M_mutex);
  100. }
  101. native_handle_type
  102. native_handle() noexcept
  103. { return &_M_mutex; }
  104. };
  105. #if _GTHREAD_USE_MUTEX_TIMEDLOCK
  106. template<typename _Derived>
  107. class __timed_mutex_impl
  108. {
  109. protected:
  110. template<typename _Rep, typename _Period>
  111. bool
  112. _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  113. {
  114. #if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
  115. using __clock = chrono::steady_clock;
  116. #else
  117. using __clock = chrono::system_clock;
  118. #endif
  119. auto __rt = chrono::duration_cast<__clock::duration>(__rtime);
  120. if (ratio_greater<__clock::period, _Period>())
  121. ++__rt;
  122. return _M_try_lock_until(__clock::now() + __rt);
  123. }
  124. template<typename _Duration>
  125. bool
  126. _M_try_lock_until(const chrono::time_point<chrono::system_clock,
  127. _Duration>& __atime)
  128. {
  129. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  130. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  131. __gthread_time_t __ts = {
  132. static_cast<std::time_t>(__s.time_since_epoch().count()),
  133. static_cast<long>(__ns.count())
  134. };
  135. return static_cast<_Derived*>(this)->_M_timedlock(__ts);
  136. }
  137. #ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
  138. template<typename _Duration>
  139. bool
  140. _M_try_lock_until(const chrono::time_point<chrono::steady_clock,
  141. _Duration>& __atime)
  142. {
  143. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  144. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  145. __gthread_time_t __ts = {
  146. static_cast<std::time_t>(__s.time_since_epoch().count()),
  147. static_cast<long>(__ns.count())
  148. };
  149. return static_cast<_Derived*>(this)->_M_clocklock(CLOCK_MONOTONIC,
  150. __ts);
  151. }
  152. #endif
  153. template<typename _Clock, typename _Duration>
  154. bool
  155. _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  156. {
  157. #if __cplusplus > 201703L
  158. static_assert(chrono::is_clock_v<_Clock>);
  159. #endif
  160. // The user-supplied clock may not tick at the same rate as
  161. // steady_clock, so we must loop in order to guarantee that
  162. // the timeout has expired before returning false.
  163. auto __now = _Clock::now();
  164. do {
  165. auto __rtime = __atime - __now;
  166. if (_M_try_lock_for(__rtime))
  167. return true;
  168. __now = _Clock::now();
  169. } while (__atime > __now);
  170. return false;
  171. }
  172. };
  173. /// The standard timed mutex type.
  174. class timed_mutex
  175. : private __mutex_base, public __timed_mutex_impl<timed_mutex>
  176. {
  177. public:
  178. typedef __native_type* native_handle_type;
  179. timed_mutex() = default;
  180. ~timed_mutex() = default;
  181. timed_mutex(const timed_mutex&) = delete;
  182. timed_mutex& operator=(const timed_mutex&) = delete;
  183. void
  184. lock()
  185. {
  186. int __e = __gthread_mutex_lock(&_M_mutex);
  187. // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
  188. if (__e)
  189. __throw_system_error(__e);
  190. }
  191. bool
  192. try_lock() noexcept
  193. {
  194. // XXX EINVAL, EAGAIN, EBUSY
  195. return !__gthread_mutex_trylock(&_M_mutex);
  196. }
  197. template <class _Rep, class _Period>
  198. bool
  199. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  200. { return _M_try_lock_for(__rtime); }
  201. template <class _Clock, class _Duration>
  202. bool
  203. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  204. { return _M_try_lock_until(__atime); }
  205. void
  206. unlock()
  207. {
  208. // XXX EINVAL, EAGAIN, EBUSY
  209. __gthread_mutex_unlock(&_M_mutex);
  210. }
  211. native_handle_type
  212. native_handle() noexcept
  213. { return &_M_mutex; }
  214. private:
  215. friend class __timed_mutex_impl<timed_mutex>;
  216. bool
  217. _M_timedlock(const __gthread_time_t& __ts)
  218. { return !__gthread_mutex_timedlock(&_M_mutex, &__ts); }
  219. #if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
  220. bool
  221. _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
  222. { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
  223. #endif
  224. };
  225. /// recursive_timed_mutex
  226. class recursive_timed_mutex
  227. : private __recursive_mutex_base,
  228. public __timed_mutex_impl<recursive_timed_mutex>
  229. {
  230. public:
  231. typedef __native_type* native_handle_type;
  232. recursive_timed_mutex() = default;
  233. ~recursive_timed_mutex() = default;
  234. recursive_timed_mutex(const recursive_timed_mutex&) = delete;
  235. recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
  236. void
  237. lock()
  238. {
  239. int __e = __gthread_recursive_mutex_lock(&_M_mutex);
  240. // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
  241. if (__e)
  242. __throw_system_error(__e);
  243. }
  244. bool
  245. try_lock() noexcept
  246. {
  247. // XXX EINVAL, EAGAIN, EBUSY
  248. return !__gthread_recursive_mutex_trylock(&_M_mutex);
  249. }
  250. template <class _Rep, class _Period>
  251. bool
  252. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  253. { return _M_try_lock_for(__rtime); }
  254. template <class _Clock, class _Duration>
  255. bool
  256. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  257. { return _M_try_lock_until(__atime); }
  258. void
  259. unlock()
  260. {
  261. // XXX EINVAL, EAGAIN, EBUSY
  262. __gthread_recursive_mutex_unlock(&_M_mutex);
  263. }
  264. native_handle_type
  265. native_handle() noexcept
  266. { return &_M_mutex; }
  267. private:
  268. friend class __timed_mutex_impl<recursive_timed_mutex>;
  269. bool
  270. _M_timedlock(const __gthread_time_t& __ts)
  271. { return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts); }
  272. #ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
  273. bool
  274. _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
  275. { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
  276. #endif
  277. };
  278. #else // !_GTHREAD_USE_MUTEX_TIMEDLOCK
  279. /// timed_mutex
  280. class timed_mutex
  281. {
  282. mutex _M_mut;
  283. condition_variable _M_cv;
  284. bool _M_locked = false;
  285. public:
  286. timed_mutex() = default;
  287. ~timed_mutex() { __glibcxx_assert( !_M_locked ); }
  288. timed_mutex(const timed_mutex&) = delete;
  289. timed_mutex& operator=(const timed_mutex&) = delete;
  290. void
  291. lock()
  292. {
  293. unique_lock<mutex> __lk(_M_mut);
  294. _M_cv.wait(__lk, [&]{ return !_M_locked; });
  295. _M_locked = true;
  296. }
  297. bool
  298. try_lock()
  299. {
  300. lock_guard<mutex> __lk(_M_mut);
  301. if (_M_locked)
  302. return false;
  303. _M_locked = true;
  304. return true;
  305. }
  306. template<typename _Rep, typename _Period>
  307. bool
  308. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  309. {
  310. unique_lock<mutex> __lk(_M_mut);
  311. if (!_M_cv.wait_for(__lk, __rtime, [&]{ return !_M_locked; }))
  312. return false;
  313. _M_locked = true;
  314. return true;
  315. }
  316. template<typename _Clock, typename _Duration>
  317. bool
  318. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  319. {
  320. unique_lock<mutex> __lk(_M_mut);
  321. if (!_M_cv.wait_until(__lk, __atime, [&]{ return !_M_locked; }))
  322. return false;
  323. _M_locked = true;
  324. return true;
  325. }
  326. void
  327. unlock()
  328. {
  329. lock_guard<mutex> __lk(_M_mut);
  330. __glibcxx_assert( _M_locked );
  331. _M_locked = false;
  332. _M_cv.notify_one();
  333. }
  334. };
  335. /// recursive_timed_mutex
  336. class recursive_timed_mutex
  337. {
  338. mutex _M_mut;
  339. condition_variable _M_cv;
  340. thread::id _M_owner;
  341. unsigned _M_count = 0;
  342. // Predicate type that tests whether the current thread can lock a mutex.
  343. struct _Can_lock
  344. {
  345. // Returns true if the mutex is unlocked or is locked by _M_caller.
  346. bool
  347. operator()() const noexcept
  348. { return _M_mx->_M_count == 0 || _M_mx->_M_owner == _M_caller; }
  349. const recursive_timed_mutex* _M_mx;
  350. thread::id _M_caller;
  351. };
  352. public:
  353. recursive_timed_mutex() = default;
  354. ~recursive_timed_mutex() { __glibcxx_assert( _M_count == 0 ); }
  355. recursive_timed_mutex(const recursive_timed_mutex&) = delete;
  356. recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
  357. void
  358. lock()
  359. {
  360. auto __id = this_thread::get_id();
  361. _Can_lock __can_lock{this, __id};
  362. unique_lock<mutex> __lk(_M_mut);
  363. _M_cv.wait(__lk, __can_lock);
  364. if (_M_count == -1u)
  365. __throw_system_error(EAGAIN); // [thread.timedmutex.recursive]/3
  366. _M_owner = __id;
  367. ++_M_count;
  368. }
  369. bool
  370. try_lock()
  371. {
  372. auto __id = this_thread::get_id();
  373. _Can_lock __can_lock{this, __id};
  374. lock_guard<mutex> __lk(_M_mut);
  375. if (!__can_lock())
  376. return false;
  377. if (_M_count == -1u)
  378. return false;
  379. _M_owner = __id;
  380. ++_M_count;
  381. return true;
  382. }
  383. template<typename _Rep, typename _Period>
  384. bool
  385. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  386. {
  387. auto __id = this_thread::get_id();
  388. _Can_lock __can_lock{this, __id};
  389. unique_lock<mutex> __lk(_M_mut);
  390. if (!_M_cv.wait_for(__lk, __rtime, __can_lock))
  391. return false;
  392. if (_M_count == -1u)
  393. return false;
  394. _M_owner = __id;
  395. ++_M_count;
  396. return true;
  397. }
  398. template<typename _Clock, typename _Duration>
  399. bool
  400. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  401. {
  402. auto __id = this_thread::get_id();
  403. _Can_lock __can_lock{this, __id};
  404. unique_lock<mutex> __lk(_M_mut);
  405. if (!_M_cv.wait_until(__lk, __atime, __can_lock))
  406. return false;
  407. if (_M_count == -1u)
  408. return false;
  409. _M_owner = __id;
  410. ++_M_count;
  411. return true;
  412. }
  413. void
  414. unlock()
  415. {
  416. lock_guard<mutex> __lk(_M_mut);
  417. __glibcxx_assert( _M_owner == this_thread::get_id() );
  418. __glibcxx_assert( _M_count > 0 );
  419. if (--_M_count == 0)
  420. {
  421. _M_owner = {};
  422. _M_cv.notify_one();
  423. }
  424. }
  425. };
  426. #endif
  427. #endif // _GLIBCXX_HAS_GTHREADS
  428. /// @cond undocumented
  429. template<typename _Lock>
  430. inline unique_lock<_Lock>
  431. __try_to_lock(_Lock& __l)
  432. { return unique_lock<_Lock>{__l, try_to_lock}; }
  433. template<int _Idx, bool _Continue = true>
  434. struct __try_lock_impl
  435. {
  436. template<typename... _Lock>
  437. static void
  438. __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
  439. {
  440. __idx = _Idx;
  441. auto __lock = std::__try_to_lock(std::get<_Idx>(__locks));
  442. if (__lock.owns_lock())
  443. {
  444. constexpr bool __cont = _Idx + 2 < sizeof...(_Lock);
  445. using __try_locker = __try_lock_impl<_Idx + 1, __cont>;
  446. __try_locker::__do_try_lock(__locks, __idx);
  447. if (__idx == -1)
  448. __lock.release();
  449. }
  450. }
  451. };
  452. template<int _Idx>
  453. struct __try_lock_impl<_Idx, false>
  454. {
  455. template<typename... _Lock>
  456. static void
  457. __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
  458. {
  459. __idx = _Idx;
  460. auto __lock = std::__try_to_lock(std::get<_Idx>(__locks));
  461. if (__lock.owns_lock())
  462. {
  463. __idx = -1;
  464. __lock.release();
  465. }
  466. }
  467. };
  468. /// @endcond
  469. /** @brief Generic try_lock.
  470. * @param __l1 Meets Lockable requirements (try_lock() may throw).
  471. * @param __l2 Meets Lockable requirements (try_lock() may throw).
  472. * @param __l3 Meets Lockable requirements (try_lock() may throw).
  473. * @return Returns -1 if all try_lock() calls return true. Otherwise returns
  474. * a 0-based index corresponding to the argument that returned false.
  475. * @post Either all arguments are locked, or none will be.
  476. *
  477. * Sequentially calls try_lock() on each argument.
  478. */
  479. template<typename _Lock1, typename _Lock2, typename... _Lock3>
  480. int
  481. try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
  482. {
  483. int __idx;
  484. auto __locks = std::tie(__l1, __l2, __l3...);
  485. __try_lock_impl<0>::__do_try_lock(__locks, __idx);
  486. return __idx;
  487. }
  488. /** @brief Generic lock.
  489. * @param __l1 Meets Lockable requirements (try_lock() may throw).
  490. * @param __l2 Meets Lockable requirements (try_lock() may throw).
  491. * @param __l3 Meets Lockable requirements (try_lock() may throw).
  492. * @throw An exception thrown by an argument's lock() or try_lock() member.
  493. * @post All arguments are locked.
  494. *
  495. * All arguments are locked via a sequence of calls to lock(), try_lock()
  496. * and unlock(). If the call exits via an exception any locks that were
  497. * obtained will be released.
  498. */
  499. template<typename _L1, typename _L2, typename... _L3>
  500. void
  501. lock(_L1& __l1, _L2& __l2, _L3&... __l3)
  502. {
  503. while (true)
  504. {
  505. using __try_locker = __try_lock_impl<0, sizeof...(_L3) != 0>;
  506. unique_lock<_L1> __first(__l1);
  507. int __idx;
  508. auto __locks = std::tie(__l2, __l3...);
  509. __try_locker::__do_try_lock(__locks, __idx);
  510. if (__idx == -1)
  511. {
  512. __first.release();
  513. return;
  514. }
  515. }
  516. }
  517. #if __cplusplus >= 201703L
  518. #define __cpp_lib_scoped_lock 201703
  519. /** @brief A scoped lock type for multiple lockable objects.
  520. *
  521. * A scoped_lock controls mutex ownership within a scope, releasing
  522. * ownership in the destructor.
  523. */
  524. template<typename... _MutexTypes>
  525. class scoped_lock
  526. {
  527. public:
  528. explicit scoped_lock(_MutexTypes&... __m) : _M_devices(std::tie(__m...))
  529. { std::lock(__m...); }
  530. explicit scoped_lock(adopt_lock_t, _MutexTypes&... __m) noexcept
  531. : _M_devices(std::tie(__m...))
  532. { } // calling thread owns mutex
  533. ~scoped_lock()
  534. { std::apply([](auto&... __m) { (__m.unlock(), ...); }, _M_devices); }
  535. scoped_lock(const scoped_lock&) = delete;
  536. scoped_lock& operator=(const scoped_lock&) = delete;
  537. private:
  538. tuple<_MutexTypes&...> _M_devices;
  539. };
  540. template<>
  541. class scoped_lock<>
  542. {
  543. public:
  544. explicit scoped_lock() = default;
  545. explicit scoped_lock(adopt_lock_t) noexcept { }
  546. ~scoped_lock() = default;
  547. scoped_lock(const scoped_lock&) = delete;
  548. scoped_lock& operator=(const scoped_lock&) = delete;
  549. };
  550. template<typename _Mutex>
  551. class scoped_lock<_Mutex>
  552. {
  553. public:
  554. using mutex_type = _Mutex;
  555. explicit scoped_lock(mutex_type& __m) : _M_device(__m)
  556. { _M_device.lock(); }
  557. explicit scoped_lock(adopt_lock_t, mutex_type& __m) noexcept
  558. : _M_device(__m)
  559. { } // calling thread owns mutex
  560. ~scoped_lock()
  561. { _M_device.unlock(); }
  562. scoped_lock(const scoped_lock&) = delete;
  563. scoped_lock& operator=(const scoped_lock&) = delete;
  564. private:
  565. mutex_type& _M_device;
  566. };
  567. #endif // C++17
  568. #ifdef _GLIBCXX_HAS_GTHREADS
  569. /// Flag type used by std::call_once
  570. struct once_flag
  571. {
  572. private:
  573. typedef __gthread_once_t __native_type;
  574. __native_type _M_once = __GTHREAD_ONCE_INIT;
  575. public:
  576. /// Constructor
  577. constexpr once_flag() noexcept = default;
  578. /// Deleted copy constructor
  579. once_flag(const once_flag&) = delete;
  580. /// Deleted assignment operator
  581. once_flag& operator=(const once_flag&) = delete;
  582. template<typename _Callable, typename... _Args>
  583. friend void
  584. call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
  585. };
  586. /// @cond undocumented
  587. #ifdef _GLIBCXX_HAVE_TLS
  588. extern __thread void* __once_callable;
  589. extern __thread void (*__once_call)();
  590. #else
  591. extern function<void()> __once_functor;
  592. extern void
  593. __set_once_functor_lock_ptr(unique_lock<mutex>*);
  594. extern mutex&
  595. __get_once_mutex();
  596. #endif
  597. extern "C" void __once_proxy(void);
  598. /// @endcond
  599. /// Invoke a callable and synchronize with other calls using the same flag
  600. template<typename _Callable, typename... _Args>
  601. void
  602. call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
  603. {
  604. // _GLIBCXX_RESOLVE_LIB_DEFECTS
  605. // 2442. call_once() shouldn't DECAY_COPY()
  606. auto __callable = [&] {
  607. std::__invoke(std::forward<_Callable>(__f),
  608. std::forward<_Args>(__args)...);
  609. };
  610. #ifdef _GLIBCXX_HAVE_TLS
  611. __once_callable = std::__addressof(__callable);
  612. __once_call = []{ (*(decltype(__callable)*)__once_callable)(); };
  613. #else
  614. unique_lock<mutex> __functor_lock(__get_once_mutex());
  615. __once_functor = __callable;
  616. __set_once_functor_lock_ptr(&__functor_lock);
  617. #endif
  618. int __e = __gthread_once(&__once._M_once, &__once_proxy);
  619. #ifndef _GLIBCXX_HAVE_TLS
  620. if (__functor_lock)
  621. __set_once_functor_lock_ptr(0);
  622. #endif
  623. #ifdef __clang_analyzer__
  624. // PR libstdc++/82481
  625. __once_callable = nullptr;
  626. __once_call = nullptr;
  627. #endif
  628. if (__e)
  629. __throw_system_error(__e);
  630. }
  631. #endif // _GLIBCXX_HAS_GTHREADS
  632. // @} group mutexes
  633. _GLIBCXX_END_NAMESPACE_VERSION
  634. } // namespace
  635. #endif // C++11
  636. #endif // _GLIBCXX_MUTEX