Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

1704 rindas
50KB

  1. // -*- C++ -*- header.
  2. // Copyright (C) 2008-2020 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file bits/atomic_base.h
  21. * This is an internal header file, included by other library headers.
  22. * Do not attempt to use it directly. @headername{atomic}
  23. */
  24. #ifndef _GLIBCXX_ATOMIC_BASE_H
  25. #define _GLIBCXX_ATOMIC_BASE_H 1
  26. #pragma GCC system_header
  27. #include <bits/c++config.h>
  28. #include <stdint.h>
  29. #include <bits/atomic_lockfree_defines.h>
  30. #include <bits/move.h>
  31. #ifndef _GLIBCXX_ALWAYS_INLINE
  32. #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
  33. #endif
  34. namespace std _GLIBCXX_VISIBILITY(default)
  35. {
  36. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  37. /**
  38. * @defgroup atomics Atomics
  39. *
  40. * Components for performing atomic operations.
  41. * @{
  42. */
  43. /// Enumeration for memory_order
  44. #if __cplusplus > 201703L
  45. enum class memory_order : int
  46. {
  47. relaxed,
  48. consume,
  49. acquire,
  50. release,
  51. acq_rel,
  52. seq_cst
  53. };
  54. inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
  55. inline constexpr memory_order memory_order_consume = memory_order::consume;
  56. inline constexpr memory_order memory_order_acquire = memory_order::acquire;
  57. inline constexpr memory_order memory_order_release = memory_order::release;
  58. inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
  59. inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
  60. #else
  61. typedef enum memory_order
  62. {
  63. memory_order_relaxed,
  64. memory_order_consume,
  65. memory_order_acquire,
  66. memory_order_release,
  67. memory_order_acq_rel,
  68. memory_order_seq_cst
  69. } memory_order;
  70. #endif
  71. enum __memory_order_modifier
  72. {
  73. __memory_order_mask = 0x0ffff,
  74. __memory_order_modifier_mask = 0xffff0000,
  75. __memory_order_hle_acquire = 0x10000,
  76. __memory_order_hle_release = 0x20000
  77. };
  78. constexpr memory_order
  79. operator|(memory_order __m, __memory_order_modifier __mod)
  80. {
  81. return memory_order(int(__m) | int(__mod));
  82. }
  83. constexpr memory_order
  84. operator&(memory_order __m, __memory_order_modifier __mod)
  85. {
  86. return memory_order(int(__m) & int(__mod));
  87. }
  88. // Drop release ordering as per [atomics.types.operations.req]/21
  89. constexpr memory_order
  90. __cmpexch_failure_order2(memory_order __m) noexcept
  91. {
  92. return __m == memory_order_acq_rel ? memory_order_acquire
  93. : __m == memory_order_release ? memory_order_relaxed : __m;
  94. }
  95. constexpr memory_order
  96. __cmpexch_failure_order(memory_order __m) noexcept
  97. {
  98. return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
  99. | __memory_order_modifier(__m & __memory_order_modifier_mask));
  100. }
  101. _GLIBCXX_ALWAYS_INLINE void
  102. atomic_thread_fence(memory_order __m) noexcept
  103. { __atomic_thread_fence(int(__m)); }
  104. _GLIBCXX_ALWAYS_INLINE void
  105. atomic_signal_fence(memory_order __m) noexcept
  106. { __atomic_signal_fence(int(__m)); }
  107. /// kill_dependency
  108. template<typename _Tp>
  109. inline _Tp
  110. kill_dependency(_Tp __y) noexcept
  111. {
  112. _Tp __ret(__y);
  113. return __ret;
  114. }
  115. // Base types for atomics.
  116. template<typename _IntTp>
  117. struct __atomic_base;
  118. #if __cplusplus <= 201703L
  119. # define _GLIBCXX20_INIT(I)
  120. #else
  121. # define __cpp_lib_atomic_value_initialization 201911L
  122. # define _GLIBCXX20_INIT(I) = I
  123. #endif
  124. #define ATOMIC_VAR_INIT(_VI) { _VI }
  125. template<typename _Tp>
  126. struct atomic;
  127. template<typename _Tp>
  128. struct atomic<_Tp*>;
  129. /* The target's "set" value for test-and-set may not be exactly 1. */
  130. #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
  131. typedef bool __atomic_flag_data_type;
  132. #else
  133. typedef unsigned char __atomic_flag_data_type;
  134. #endif
  135. /**
  136. * @brief Base type for atomic_flag.
  137. *
  138. * Base type is POD with data, allowing atomic_flag to derive from
  139. * it and meet the standard layout type requirement. In addition to
  140. * compatibility with a C interface, this allows different
  141. * implementations of atomic_flag to use the same atomic operation
  142. * functions, via a standard conversion to the __atomic_flag_base
  143. * argument.
  144. */
  145. _GLIBCXX_BEGIN_EXTERN_C
  146. struct __atomic_flag_base
  147. {
  148. __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
  149. };
  150. _GLIBCXX_END_EXTERN_C
  151. #define ATOMIC_FLAG_INIT { 0 }
  152. /// atomic_flag
  153. struct atomic_flag : public __atomic_flag_base
  154. {
  155. atomic_flag() noexcept = default;
  156. ~atomic_flag() noexcept = default;
  157. atomic_flag(const atomic_flag&) = delete;
  158. atomic_flag& operator=(const atomic_flag&) = delete;
  159. atomic_flag& operator=(const atomic_flag&) volatile = delete;
  160. // Conversion to ATOMIC_FLAG_INIT.
  161. constexpr atomic_flag(bool __i) noexcept
  162. : __atomic_flag_base{ _S_init(__i) }
  163. { }
  164. _GLIBCXX_ALWAYS_INLINE bool
  165. test_and_set(memory_order __m = memory_order_seq_cst) noexcept
  166. {
  167. return __atomic_test_and_set (&_M_i, int(__m));
  168. }
  169. _GLIBCXX_ALWAYS_INLINE bool
  170. test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
  171. {
  172. return __atomic_test_and_set (&_M_i, int(__m));
  173. }
  174. _GLIBCXX_ALWAYS_INLINE void
  175. clear(memory_order __m = memory_order_seq_cst) noexcept
  176. {
  177. memory_order __b = __m & __memory_order_mask;
  178. __glibcxx_assert(__b != memory_order_consume);
  179. __glibcxx_assert(__b != memory_order_acquire);
  180. __glibcxx_assert(__b != memory_order_acq_rel);
  181. __atomic_clear (&_M_i, int(__m));
  182. }
  183. _GLIBCXX_ALWAYS_INLINE void
  184. clear(memory_order __m = memory_order_seq_cst) volatile noexcept
  185. {
  186. memory_order __b = __m & __memory_order_mask;
  187. __glibcxx_assert(__b != memory_order_consume);
  188. __glibcxx_assert(__b != memory_order_acquire);
  189. __glibcxx_assert(__b != memory_order_acq_rel);
  190. __atomic_clear (&_M_i, int(__m));
  191. }
  192. private:
  193. static constexpr __atomic_flag_data_type
  194. _S_init(bool __i)
  195. { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
  196. };
  197. /// Base class for atomic integrals.
  198. //
  199. // For each of the integral types, define atomic_[integral type] struct
  200. //
  201. // atomic_bool bool
  202. // atomic_char char
  203. // atomic_schar signed char
  204. // atomic_uchar unsigned char
  205. // atomic_short short
  206. // atomic_ushort unsigned short
  207. // atomic_int int
  208. // atomic_uint unsigned int
  209. // atomic_long long
  210. // atomic_ulong unsigned long
  211. // atomic_llong long long
  212. // atomic_ullong unsigned long long
  213. // atomic_char8_t char8_t
  214. // atomic_char16_t char16_t
  215. // atomic_char32_t char32_t
  216. // atomic_wchar_t wchar_t
  217. //
  218. // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
  219. // 8 bytes, since that is what GCC built-in functions for atomic
  220. // memory access expect.
  221. template<typename _ITp>
  222. struct __atomic_base
  223. {
  224. using value_type = _ITp;
  225. using difference_type = value_type;
  226. private:
  227. typedef _ITp __int_type;
  228. static constexpr int _S_alignment =
  229. sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
  230. alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
  231. public:
  232. __atomic_base() noexcept = default;
  233. ~__atomic_base() noexcept = default;
  234. __atomic_base(const __atomic_base&) = delete;
  235. __atomic_base& operator=(const __atomic_base&) = delete;
  236. __atomic_base& operator=(const __atomic_base&) volatile = delete;
  237. // Requires __int_type convertible to _M_i.
  238. constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
  239. operator __int_type() const noexcept
  240. { return load(); }
  241. operator __int_type() const volatile noexcept
  242. { return load(); }
  243. __int_type
  244. operator=(__int_type __i) noexcept
  245. {
  246. store(__i);
  247. return __i;
  248. }
  249. __int_type
  250. operator=(__int_type __i) volatile noexcept
  251. {
  252. store(__i);
  253. return __i;
  254. }
  255. __int_type
  256. operator++(int) noexcept
  257. { return fetch_add(1); }
  258. __int_type
  259. operator++(int) volatile noexcept
  260. { return fetch_add(1); }
  261. __int_type
  262. operator--(int) noexcept
  263. { return fetch_sub(1); }
  264. __int_type
  265. operator--(int) volatile noexcept
  266. { return fetch_sub(1); }
  267. __int_type
  268. operator++() noexcept
  269. { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  270. __int_type
  271. operator++() volatile noexcept
  272. { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  273. __int_type
  274. operator--() noexcept
  275. { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  276. __int_type
  277. operator--() volatile noexcept
  278. { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  279. __int_type
  280. operator+=(__int_type __i) noexcept
  281. { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  282. __int_type
  283. operator+=(__int_type __i) volatile noexcept
  284. { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  285. __int_type
  286. operator-=(__int_type __i) noexcept
  287. { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  288. __int_type
  289. operator-=(__int_type __i) volatile noexcept
  290. { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  291. __int_type
  292. operator&=(__int_type __i) noexcept
  293. { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  294. __int_type
  295. operator&=(__int_type __i) volatile noexcept
  296. { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  297. __int_type
  298. operator|=(__int_type __i) noexcept
  299. { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  300. __int_type
  301. operator|=(__int_type __i) volatile noexcept
  302. { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  303. __int_type
  304. operator^=(__int_type __i) noexcept
  305. { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  306. __int_type
  307. operator^=(__int_type __i) volatile noexcept
  308. { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  309. bool
  310. is_lock_free() const noexcept
  311. {
  312. // Use a fake, minimally aligned pointer.
  313. return __atomic_is_lock_free(sizeof(_M_i),
  314. reinterpret_cast<void *>(-_S_alignment));
  315. }
  316. bool
  317. is_lock_free() const volatile noexcept
  318. {
  319. // Use a fake, minimally aligned pointer.
  320. return __atomic_is_lock_free(sizeof(_M_i),
  321. reinterpret_cast<void *>(-_S_alignment));
  322. }
  323. _GLIBCXX_ALWAYS_INLINE void
  324. store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
  325. {
  326. memory_order __b = __m & __memory_order_mask;
  327. __glibcxx_assert(__b != memory_order_acquire);
  328. __glibcxx_assert(__b != memory_order_acq_rel);
  329. __glibcxx_assert(__b != memory_order_consume);
  330. __atomic_store_n(&_M_i, __i, int(__m));
  331. }
  332. _GLIBCXX_ALWAYS_INLINE void
  333. store(__int_type __i,
  334. memory_order __m = memory_order_seq_cst) volatile noexcept
  335. {
  336. memory_order __b = __m & __memory_order_mask;
  337. __glibcxx_assert(__b != memory_order_acquire);
  338. __glibcxx_assert(__b != memory_order_acq_rel);
  339. __glibcxx_assert(__b != memory_order_consume);
  340. __atomic_store_n(&_M_i, __i, int(__m));
  341. }
  342. _GLIBCXX_ALWAYS_INLINE __int_type
  343. load(memory_order __m = memory_order_seq_cst) const noexcept
  344. {
  345. memory_order __b = __m & __memory_order_mask;
  346. __glibcxx_assert(__b != memory_order_release);
  347. __glibcxx_assert(__b != memory_order_acq_rel);
  348. return __atomic_load_n(&_M_i, int(__m));
  349. }
  350. _GLIBCXX_ALWAYS_INLINE __int_type
  351. load(memory_order __m = memory_order_seq_cst) const volatile noexcept
  352. {
  353. memory_order __b = __m & __memory_order_mask;
  354. __glibcxx_assert(__b != memory_order_release);
  355. __glibcxx_assert(__b != memory_order_acq_rel);
  356. return __atomic_load_n(&_M_i, int(__m));
  357. }
  358. _GLIBCXX_ALWAYS_INLINE __int_type
  359. exchange(__int_type __i,
  360. memory_order __m = memory_order_seq_cst) noexcept
  361. {
  362. return __atomic_exchange_n(&_M_i, __i, int(__m));
  363. }
  364. _GLIBCXX_ALWAYS_INLINE __int_type
  365. exchange(__int_type __i,
  366. memory_order __m = memory_order_seq_cst) volatile noexcept
  367. {
  368. return __atomic_exchange_n(&_M_i, __i, int(__m));
  369. }
  370. _GLIBCXX_ALWAYS_INLINE bool
  371. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  372. memory_order __m1, memory_order __m2) noexcept
  373. {
  374. memory_order __b2 = __m2 & __memory_order_mask;
  375. memory_order __b1 = __m1 & __memory_order_mask;
  376. __glibcxx_assert(__b2 != memory_order_release);
  377. __glibcxx_assert(__b2 != memory_order_acq_rel);
  378. __glibcxx_assert(__b2 <= __b1);
  379. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
  380. int(__m1), int(__m2));
  381. }
  382. _GLIBCXX_ALWAYS_INLINE bool
  383. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  384. memory_order __m1,
  385. memory_order __m2) volatile noexcept
  386. {
  387. memory_order __b2 = __m2 & __memory_order_mask;
  388. memory_order __b1 = __m1 & __memory_order_mask;
  389. __glibcxx_assert(__b2 != memory_order_release);
  390. __glibcxx_assert(__b2 != memory_order_acq_rel);
  391. __glibcxx_assert(__b2 <= __b1);
  392. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
  393. int(__m1), int(__m2));
  394. }
  395. _GLIBCXX_ALWAYS_INLINE bool
  396. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  397. memory_order __m = memory_order_seq_cst) noexcept
  398. {
  399. return compare_exchange_weak(__i1, __i2, __m,
  400. __cmpexch_failure_order(__m));
  401. }
  402. _GLIBCXX_ALWAYS_INLINE bool
  403. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  404. memory_order __m = memory_order_seq_cst) volatile noexcept
  405. {
  406. return compare_exchange_weak(__i1, __i2, __m,
  407. __cmpexch_failure_order(__m));
  408. }
  409. _GLIBCXX_ALWAYS_INLINE bool
  410. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  411. memory_order __m1, memory_order __m2) noexcept
  412. {
  413. memory_order __b2 = __m2 & __memory_order_mask;
  414. memory_order __b1 = __m1 & __memory_order_mask;
  415. __glibcxx_assert(__b2 != memory_order_release);
  416. __glibcxx_assert(__b2 != memory_order_acq_rel);
  417. __glibcxx_assert(__b2 <= __b1);
  418. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
  419. int(__m1), int(__m2));
  420. }
  421. _GLIBCXX_ALWAYS_INLINE bool
  422. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  423. memory_order __m1,
  424. memory_order __m2) volatile noexcept
  425. {
  426. memory_order __b2 = __m2 & __memory_order_mask;
  427. memory_order __b1 = __m1 & __memory_order_mask;
  428. __glibcxx_assert(__b2 != memory_order_release);
  429. __glibcxx_assert(__b2 != memory_order_acq_rel);
  430. __glibcxx_assert(__b2 <= __b1);
  431. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
  432. int(__m1), int(__m2));
  433. }
  434. _GLIBCXX_ALWAYS_INLINE bool
  435. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  436. memory_order __m = memory_order_seq_cst) noexcept
  437. {
  438. return compare_exchange_strong(__i1, __i2, __m,
  439. __cmpexch_failure_order(__m));
  440. }
  441. _GLIBCXX_ALWAYS_INLINE bool
  442. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  443. memory_order __m = memory_order_seq_cst) volatile noexcept
  444. {
  445. return compare_exchange_strong(__i1, __i2, __m,
  446. __cmpexch_failure_order(__m));
  447. }
  448. _GLIBCXX_ALWAYS_INLINE __int_type
  449. fetch_add(__int_type __i,
  450. memory_order __m = memory_order_seq_cst) noexcept
  451. { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
  452. _GLIBCXX_ALWAYS_INLINE __int_type
  453. fetch_add(__int_type __i,
  454. memory_order __m = memory_order_seq_cst) volatile noexcept
  455. { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
  456. _GLIBCXX_ALWAYS_INLINE __int_type
  457. fetch_sub(__int_type __i,
  458. memory_order __m = memory_order_seq_cst) noexcept
  459. { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
  460. _GLIBCXX_ALWAYS_INLINE __int_type
  461. fetch_sub(__int_type __i,
  462. memory_order __m = memory_order_seq_cst) volatile noexcept
  463. { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
  464. _GLIBCXX_ALWAYS_INLINE __int_type
  465. fetch_and(__int_type __i,
  466. memory_order __m = memory_order_seq_cst) noexcept
  467. { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
  468. _GLIBCXX_ALWAYS_INLINE __int_type
  469. fetch_and(__int_type __i,
  470. memory_order __m = memory_order_seq_cst) volatile noexcept
  471. { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
  472. _GLIBCXX_ALWAYS_INLINE __int_type
  473. fetch_or(__int_type __i,
  474. memory_order __m = memory_order_seq_cst) noexcept
  475. { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
  476. _GLIBCXX_ALWAYS_INLINE __int_type
  477. fetch_or(__int_type __i,
  478. memory_order __m = memory_order_seq_cst) volatile noexcept
  479. { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
  480. _GLIBCXX_ALWAYS_INLINE __int_type
  481. fetch_xor(__int_type __i,
  482. memory_order __m = memory_order_seq_cst) noexcept
  483. { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
  484. _GLIBCXX_ALWAYS_INLINE __int_type
  485. fetch_xor(__int_type __i,
  486. memory_order __m = memory_order_seq_cst) volatile noexcept
  487. { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
  488. };
  489. /// Partial specialization for pointer types.
  490. template<typename _PTp>
  491. struct __atomic_base<_PTp*>
  492. {
  493. private:
  494. typedef _PTp* __pointer_type;
  495. __pointer_type _M_p _GLIBCXX20_INIT(nullptr);
  496. // Factored out to facilitate explicit specialization.
  497. constexpr ptrdiff_t
  498. _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
  499. constexpr ptrdiff_t
  500. _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
  501. public:
  502. __atomic_base() noexcept = default;
  503. ~__atomic_base() noexcept = default;
  504. __atomic_base(const __atomic_base&) = delete;
  505. __atomic_base& operator=(const __atomic_base&) = delete;
  506. __atomic_base& operator=(const __atomic_base&) volatile = delete;
  507. // Requires __pointer_type convertible to _M_p.
  508. constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
  509. operator __pointer_type() const noexcept
  510. { return load(); }
  511. operator __pointer_type() const volatile noexcept
  512. { return load(); }
  513. __pointer_type
  514. operator=(__pointer_type __p) noexcept
  515. {
  516. store(__p);
  517. return __p;
  518. }
  519. __pointer_type
  520. operator=(__pointer_type __p) volatile noexcept
  521. {
  522. store(__p);
  523. return __p;
  524. }
  525. __pointer_type
  526. operator++(int) noexcept
  527. { return fetch_add(1); }
  528. __pointer_type
  529. operator++(int) volatile noexcept
  530. { return fetch_add(1); }
  531. __pointer_type
  532. operator--(int) noexcept
  533. { return fetch_sub(1); }
  534. __pointer_type
  535. operator--(int) volatile noexcept
  536. { return fetch_sub(1); }
  537. __pointer_type
  538. operator++() noexcept
  539. { return __atomic_add_fetch(&_M_p, _M_type_size(1),
  540. int(memory_order_seq_cst)); }
  541. __pointer_type
  542. operator++() volatile noexcept
  543. { return __atomic_add_fetch(&_M_p, _M_type_size(1),
  544. int(memory_order_seq_cst)); }
  545. __pointer_type
  546. operator--() noexcept
  547. { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
  548. int(memory_order_seq_cst)); }
  549. __pointer_type
  550. operator--() volatile noexcept
  551. { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
  552. int(memory_order_seq_cst)); }
  553. __pointer_type
  554. operator+=(ptrdiff_t __d) noexcept
  555. { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
  556. int(memory_order_seq_cst)); }
  557. __pointer_type
  558. operator+=(ptrdiff_t __d) volatile noexcept
  559. { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
  560. int(memory_order_seq_cst)); }
  561. __pointer_type
  562. operator-=(ptrdiff_t __d) noexcept
  563. { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
  564. int(memory_order_seq_cst)); }
  565. __pointer_type
  566. operator-=(ptrdiff_t __d) volatile noexcept
  567. { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
  568. int(memory_order_seq_cst)); }
  569. bool
  570. is_lock_free() const noexcept
  571. {
  572. // Produce a fake, minimally aligned pointer.
  573. return __atomic_is_lock_free(sizeof(_M_p),
  574. reinterpret_cast<void *>(-__alignof(_M_p)));
  575. }
  576. bool
  577. is_lock_free() const volatile noexcept
  578. {
  579. // Produce a fake, minimally aligned pointer.
  580. return __atomic_is_lock_free(sizeof(_M_p),
  581. reinterpret_cast<void *>(-__alignof(_M_p)));
  582. }
  583. _GLIBCXX_ALWAYS_INLINE void
  584. store(__pointer_type __p,
  585. memory_order __m = memory_order_seq_cst) noexcept
  586. {
  587. memory_order __b = __m & __memory_order_mask;
  588. __glibcxx_assert(__b != memory_order_acquire);
  589. __glibcxx_assert(__b != memory_order_acq_rel);
  590. __glibcxx_assert(__b != memory_order_consume);
  591. __atomic_store_n(&_M_p, __p, int(__m));
  592. }
  593. _GLIBCXX_ALWAYS_INLINE void
  594. store(__pointer_type __p,
  595. memory_order __m = memory_order_seq_cst) volatile noexcept
  596. {
  597. memory_order __b = __m & __memory_order_mask;
  598. __glibcxx_assert(__b != memory_order_acquire);
  599. __glibcxx_assert(__b != memory_order_acq_rel);
  600. __glibcxx_assert(__b != memory_order_consume);
  601. __atomic_store_n(&_M_p, __p, int(__m));
  602. }
  603. _GLIBCXX_ALWAYS_INLINE __pointer_type
  604. load(memory_order __m = memory_order_seq_cst) const noexcept
  605. {
  606. memory_order __b = __m & __memory_order_mask;
  607. __glibcxx_assert(__b != memory_order_release);
  608. __glibcxx_assert(__b != memory_order_acq_rel);
  609. return __atomic_load_n(&_M_p, int(__m));
  610. }
  611. _GLIBCXX_ALWAYS_INLINE __pointer_type
  612. load(memory_order __m = memory_order_seq_cst) const volatile noexcept
  613. {
  614. memory_order __b = __m & __memory_order_mask;
  615. __glibcxx_assert(__b != memory_order_release);
  616. __glibcxx_assert(__b != memory_order_acq_rel);
  617. return __atomic_load_n(&_M_p, int(__m));
  618. }
  619. _GLIBCXX_ALWAYS_INLINE __pointer_type
  620. exchange(__pointer_type __p,
  621. memory_order __m = memory_order_seq_cst) noexcept
  622. {
  623. return __atomic_exchange_n(&_M_p, __p, int(__m));
  624. }
  625. _GLIBCXX_ALWAYS_INLINE __pointer_type
  626. exchange(__pointer_type __p,
  627. memory_order __m = memory_order_seq_cst) volatile noexcept
  628. {
  629. return __atomic_exchange_n(&_M_p, __p, int(__m));
  630. }
  631. _GLIBCXX_ALWAYS_INLINE bool
  632. compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
  633. memory_order __m1,
  634. memory_order __m2) noexcept
  635. {
  636. memory_order __b2 = __m2 & __memory_order_mask;
  637. memory_order __b1 = __m1 & __memory_order_mask;
  638. __glibcxx_assert(__b2 != memory_order_release);
  639. __glibcxx_assert(__b2 != memory_order_acq_rel);
  640. __glibcxx_assert(__b2 <= __b1);
  641. return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
  642. int(__m1), int(__m2));
  643. }
  644. _GLIBCXX_ALWAYS_INLINE bool
  645. compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
  646. memory_order __m1,
  647. memory_order __m2) volatile noexcept
  648. {
  649. memory_order __b2 = __m2 & __memory_order_mask;
  650. memory_order __b1 = __m1 & __memory_order_mask;
  651. __glibcxx_assert(__b2 != memory_order_release);
  652. __glibcxx_assert(__b2 != memory_order_acq_rel);
  653. __glibcxx_assert(__b2 <= __b1);
  654. return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
  655. int(__m1), int(__m2));
  656. }
  657. _GLIBCXX_ALWAYS_INLINE __pointer_type
  658. fetch_add(ptrdiff_t __d,
  659. memory_order __m = memory_order_seq_cst) noexcept
  660. { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
  661. _GLIBCXX_ALWAYS_INLINE __pointer_type
  662. fetch_add(ptrdiff_t __d,
  663. memory_order __m = memory_order_seq_cst) volatile noexcept
  664. { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
  665. _GLIBCXX_ALWAYS_INLINE __pointer_type
  666. fetch_sub(ptrdiff_t __d,
  667. memory_order __m = memory_order_seq_cst) noexcept
  668. { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
  669. _GLIBCXX_ALWAYS_INLINE __pointer_type
  670. fetch_sub(ptrdiff_t __d,
  671. memory_order __m = memory_order_seq_cst) volatile noexcept
  672. { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
  673. };
  674. #if __cplusplus > 201703L
  675. // Implementation details of atomic_ref and atomic<floating-point>.
  676. namespace __atomic_impl
  677. {
  678. // Remove volatile and create a non-deduced context for value arguments.
  679. template<typename _Tp>
  680. using _Val = remove_volatile_t<_Tp>;
  681. // As above, but for difference_type arguments.
  682. template<typename _Tp>
  683. using _Diff = conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
  684. template<size_t _Size, size_t _Align>
  685. _GLIBCXX_ALWAYS_INLINE bool
  686. is_lock_free() noexcept
  687. {
  688. // Produce a fake, minimally aligned pointer.
  689. return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
  690. }
  691. template<typename _Tp>
  692. _GLIBCXX_ALWAYS_INLINE void
  693. store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
  694. { __atomic_store(__ptr, std::__addressof(__t), int(__m)); }
  695. template<typename _Tp>
  696. _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
  697. load(const _Tp* __ptr, memory_order __m) noexcept
  698. {
  699. alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
  700. auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
  701. __atomic_load(__ptr, __dest, int(__m));
  702. return *__dest;
  703. }
  704. template<typename _Tp>
  705. _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
  706. exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
  707. {
  708. alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
  709. auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
  710. __atomic_exchange(__ptr, std::__addressof(__desired), __dest, int(__m));
  711. return *__dest;
  712. }
  713. template<typename _Tp>
  714. _GLIBCXX_ALWAYS_INLINE bool
  715. compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
  716. _Val<_Tp> __desired, memory_order __success,
  717. memory_order __failure) noexcept
  718. {
  719. return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
  720. std::__addressof(__desired), true,
  721. int(__success), int(__failure));
  722. }
  723. template<typename _Tp>
  724. _GLIBCXX_ALWAYS_INLINE bool
  725. compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
  726. _Val<_Tp> __desired, memory_order __success,
  727. memory_order __failure) noexcept
  728. {
  729. return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
  730. std::__addressof(__desired), false,
  731. int(__success), int(__failure));
  732. }
  733. template<typename _Tp>
  734. _GLIBCXX_ALWAYS_INLINE _Tp
  735. fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
  736. { return __atomic_fetch_add(__ptr, __i, int(__m)); }
  737. template<typename _Tp>
  738. _GLIBCXX_ALWAYS_INLINE _Tp
  739. fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
  740. { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
  741. template<typename _Tp>
  742. _GLIBCXX_ALWAYS_INLINE _Tp
  743. fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
  744. { return __atomic_fetch_and(__ptr, __i, int(__m)); }
  745. template<typename _Tp>
  746. _GLIBCXX_ALWAYS_INLINE _Tp
  747. fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
  748. { return __atomic_fetch_or(__ptr, __i, int(__m)); }
  749. template<typename _Tp>
  750. _GLIBCXX_ALWAYS_INLINE _Tp
  751. fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
  752. { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
  753. template<typename _Tp>
  754. _GLIBCXX_ALWAYS_INLINE _Tp
  755. __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
  756. { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
  757. template<typename _Tp>
  758. _GLIBCXX_ALWAYS_INLINE _Tp
  759. __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
  760. { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
  761. template<typename _Tp>
  762. _GLIBCXX_ALWAYS_INLINE _Tp
  763. __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
  764. { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
  765. template<typename _Tp>
  766. _GLIBCXX_ALWAYS_INLINE _Tp
  767. __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
  768. { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
  769. template<typename _Tp>
  770. _GLIBCXX_ALWAYS_INLINE _Tp
  771. __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
  772. { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
  773. template<typename _Tp>
  774. _Tp
  775. __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
  776. {
  777. _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
  778. _Val<_Tp> __newval = __oldval + __i;
  779. while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
  780. memory_order_relaxed))
  781. __newval = __oldval + __i;
  782. return __oldval;
  783. }
  784. template<typename _Tp>
  785. _Tp
  786. __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
  787. {
  788. _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
  789. _Val<_Tp> __newval = __oldval - __i;
  790. while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
  791. memory_order_relaxed))
  792. __newval = __oldval - __i;
  793. return __oldval;
  794. }
  795. template<typename _Tp>
  796. _Tp
  797. __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
  798. {
  799. _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
  800. _Val<_Tp> __newval = __oldval + __i;
  801. while (!compare_exchange_weak(__ptr, __oldval, __newval,
  802. memory_order_seq_cst,
  803. memory_order_relaxed))
  804. __newval = __oldval + __i;
  805. return __newval;
  806. }
  807. template<typename _Tp>
  808. _Tp
  809. __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
  810. {
  811. _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
  812. _Val<_Tp> __newval = __oldval - __i;
  813. while (!compare_exchange_weak(__ptr, __oldval, __newval,
  814. memory_order_seq_cst,
  815. memory_order_relaxed))
  816. __newval = __oldval - __i;
  817. return __newval;
  818. }
  819. } // namespace __atomic_impl
  820. // base class for atomic<floating-point-type>
  821. template<typename _Fp>
  822. struct __atomic_float
  823. {
  824. static_assert(is_floating_point_v<_Fp>);
  825. static constexpr size_t _S_alignment = __alignof__(_Fp);
  826. public:
  827. using value_type = _Fp;
  828. using difference_type = value_type;
  829. static constexpr bool is_always_lock_free
  830. = __atomic_always_lock_free(sizeof(_Fp), 0);
  831. __atomic_float() = default;
  832. constexpr
  833. __atomic_float(_Fp __t) : _M_fp(__t)
  834. { }
  835. __atomic_float(const __atomic_float&) = delete;
  836. __atomic_float& operator=(const __atomic_float&) = delete;
  837. __atomic_float& operator=(const __atomic_float&) volatile = delete;
  838. _Fp
  839. operator=(_Fp __t) volatile noexcept
  840. {
  841. this->store(__t);
  842. return __t;
  843. }
  844. _Fp
  845. operator=(_Fp __t) noexcept
  846. {
  847. this->store(__t);
  848. return __t;
  849. }
  850. bool
  851. is_lock_free() const volatile noexcept
  852. { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
  853. bool
  854. is_lock_free() const noexcept
  855. { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
  856. void
  857. store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept
  858. { __atomic_impl::store(&_M_fp, __t, __m); }
  859. void
  860. store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept
  861. { __atomic_impl::store(&_M_fp, __t, __m); }
  862. _Fp
  863. load(memory_order __m = memory_order_seq_cst) const volatile noexcept
  864. { return __atomic_impl::load(&_M_fp, __m); }
  865. _Fp
  866. load(memory_order __m = memory_order_seq_cst) const noexcept
  867. { return __atomic_impl::load(&_M_fp, __m); }
  868. operator _Fp() const volatile noexcept { return this->load(); }
  869. operator _Fp() const noexcept { return this->load(); }
  870. _Fp
  871. exchange(_Fp __desired,
  872. memory_order __m = memory_order_seq_cst) volatile noexcept
  873. { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
  874. _Fp
  875. exchange(_Fp __desired,
  876. memory_order __m = memory_order_seq_cst) noexcept
  877. { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
  878. bool
  879. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  880. memory_order __success,
  881. memory_order __failure) noexcept
  882. {
  883. return __atomic_impl::compare_exchange_weak(&_M_fp,
  884. __expected, __desired,
  885. __success, __failure);
  886. }
  887. bool
  888. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  889. memory_order __success,
  890. memory_order __failure) volatile noexcept
  891. {
  892. return __atomic_impl::compare_exchange_weak(&_M_fp,
  893. __expected, __desired,
  894. __success, __failure);
  895. }
  896. bool
  897. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  898. memory_order __success,
  899. memory_order __failure) noexcept
  900. {
  901. return __atomic_impl::compare_exchange_strong(&_M_fp,
  902. __expected, __desired,
  903. __success, __failure);
  904. }
  905. bool
  906. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  907. memory_order __success,
  908. memory_order __failure) volatile noexcept
  909. {
  910. return __atomic_impl::compare_exchange_strong(&_M_fp,
  911. __expected, __desired,
  912. __success, __failure);
  913. }
  914. bool
  915. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  916. memory_order __order = memory_order_seq_cst)
  917. noexcept
  918. {
  919. return compare_exchange_weak(__expected, __desired, __order,
  920. __cmpexch_failure_order(__order));
  921. }
  922. bool
  923. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  924. memory_order __order = memory_order_seq_cst)
  925. volatile noexcept
  926. {
  927. return compare_exchange_weak(__expected, __desired, __order,
  928. __cmpexch_failure_order(__order));
  929. }
  930. bool
  931. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  932. memory_order __order = memory_order_seq_cst)
  933. noexcept
  934. {
  935. return compare_exchange_strong(__expected, __desired, __order,
  936. __cmpexch_failure_order(__order));
  937. }
  938. bool
  939. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  940. memory_order __order = memory_order_seq_cst)
  941. volatile noexcept
  942. {
  943. return compare_exchange_strong(__expected, __desired, __order,
  944. __cmpexch_failure_order(__order));
  945. }
  946. value_type
  947. fetch_add(value_type __i,
  948. memory_order __m = memory_order_seq_cst) noexcept
  949. { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
  950. value_type
  951. fetch_add(value_type __i,
  952. memory_order __m = memory_order_seq_cst) volatile noexcept
  953. { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
  954. value_type
  955. fetch_sub(value_type __i,
  956. memory_order __m = memory_order_seq_cst) noexcept
  957. { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
  958. value_type
  959. fetch_sub(value_type __i,
  960. memory_order __m = memory_order_seq_cst) volatile noexcept
  961. { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
  962. value_type
  963. operator+=(value_type __i) noexcept
  964. { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
  965. value_type
  966. operator+=(value_type __i) volatile noexcept
  967. { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
  968. value_type
  969. operator-=(value_type __i) noexcept
  970. { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
  971. value_type
  972. operator-=(value_type __i) volatile noexcept
  973. { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
  974. private:
  975. alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
  976. };
  977. #undef _GLIBCXX20_INIT
  978. template<typename _Tp,
  979. bool = is_integral_v<_Tp>, bool = is_floating_point_v<_Tp>>
  980. struct __atomic_ref;
  981. // base class for non-integral, non-floating-point, non-pointer types
  982. template<typename _Tp>
  983. struct __atomic_ref<_Tp, false, false>
  984. {
  985. static_assert(is_trivially_copyable_v<_Tp>);
  986. // 1/2/4/8/16-byte types must be aligned to at least their size.
  987. static constexpr int _S_min_alignment
  988. = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16
  989. ? 0 : sizeof(_Tp);
  990. public:
  991. using value_type = _Tp;
  992. static constexpr bool is_always_lock_free
  993. = __atomic_always_lock_free(sizeof(_Tp), 0);
  994. static constexpr size_t required_alignment
  995. = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
  996. __atomic_ref& operator=(const __atomic_ref&) = delete;
  997. explicit
  998. __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t))
  999. { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
  1000. __atomic_ref(const __atomic_ref&) noexcept = default;
  1001. _Tp
  1002. operator=(_Tp __t) const noexcept
  1003. {
  1004. this->store(__t);
  1005. return __t;
  1006. }
  1007. operator _Tp() const noexcept { return this->load(); }
  1008. bool
  1009. is_lock_free() const noexcept
  1010. { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
  1011. void
  1012. store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
  1013. { __atomic_impl::store(_M_ptr, __t, __m); }
  1014. _Tp
  1015. load(memory_order __m = memory_order_seq_cst) const noexcept
  1016. { return __atomic_impl::load(_M_ptr, __m); }
  1017. _Tp
  1018. exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
  1019. const noexcept
  1020. { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
  1021. bool
  1022. compare_exchange_weak(_Tp& __expected, _Tp __desired,
  1023. memory_order __success,
  1024. memory_order __failure) const noexcept
  1025. {
  1026. return __atomic_impl::compare_exchange_weak(_M_ptr,
  1027. __expected, __desired,
  1028. __success, __failure);
  1029. }
  1030. bool
  1031. compare_exchange_strong(_Tp& __expected, _Tp __desired,
  1032. memory_order __success,
  1033. memory_order __failure) const noexcept
  1034. {
  1035. return __atomic_impl::compare_exchange_strong(_M_ptr,
  1036. __expected, __desired,
  1037. __success, __failure);
  1038. }
  1039. bool
  1040. compare_exchange_weak(_Tp& __expected, _Tp __desired,
  1041. memory_order __order = memory_order_seq_cst)
  1042. const noexcept
  1043. {
  1044. return compare_exchange_weak(__expected, __desired, __order,
  1045. __cmpexch_failure_order(__order));
  1046. }
  1047. bool
  1048. compare_exchange_strong(_Tp& __expected, _Tp __desired,
  1049. memory_order __order = memory_order_seq_cst)
  1050. const noexcept
  1051. {
  1052. return compare_exchange_strong(__expected, __desired, __order,
  1053. __cmpexch_failure_order(__order));
  1054. }
  1055. private:
  1056. _Tp* _M_ptr;
  1057. };
  1058. // base class for atomic_ref<integral-type>
  1059. template<typename _Tp>
  1060. struct __atomic_ref<_Tp, true, false>
  1061. {
  1062. static_assert(is_integral_v<_Tp>);
  1063. public:
  1064. using value_type = _Tp;
  1065. using difference_type = value_type;
  1066. static constexpr bool is_always_lock_free
  1067. = __atomic_always_lock_free(sizeof(_Tp), 0);
  1068. static constexpr size_t required_alignment
  1069. = sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp);
  1070. __atomic_ref() = delete;
  1071. __atomic_ref& operator=(const __atomic_ref&) = delete;
  1072. explicit
  1073. __atomic_ref(_Tp& __t) : _M_ptr(&__t)
  1074. { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
  1075. __atomic_ref(const __atomic_ref&) noexcept = default;
  1076. _Tp
  1077. operator=(_Tp __t) const noexcept
  1078. {
  1079. this->store(__t);
  1080. return __t;
  1081. }
  1082. operator _Tp() const noexcept { return this->load(); }
  1083. bool
  1084. is_lock_free() const noexcept
  1085. {
  1086. return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
  1087. }
  1088. void
  1089. store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
  1090. { __atomic_impl::store(_M_ptr, __t, __m); }
  1091. _Tp
  1092. load(memory_order __m = memory_order_seq_cst) const noexcept
  1093. { return __atomic_impl::load(_M_ptr, __m); }
  1094. _Tp
  1095. exchange(_Tp __desired,
  1096. memory_order __m = memory_order_seq_cst) const noexcept
  1097. { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
  1098. bool
  1099. compare_exchange_weak(_Tp& __expected, _Tp __desired,
  1100. memory_order __success,
  1101. memory_order __failure) const noexcept
  1102. {
  1103. return __atomic_impl::compare_exchange_weak(_M_ptr,
  1104. __expected, __desired,
  1105. __success, __failure);
  1106. }
  1107. bool
  1108. compare_exchange_strong(_Tp& __expected, _Tp __desired,
  1109. memory_order __success,
  1110. memory_order __failure) const noexcept
  1111. {
  1112. return __atomic_impl::compare_exchange_strong(_M_ptr,
  1113. __expected, __desired,
  1114. __success, __failure);
  1115. }
  1116. bool
  1117. compare_exchange_weak(_Tp& __expected, _Tp __desired,
  1118. memory_order __order = memory_order_seq_cst)
  1119. const noexcept
  1120. {
  1121. return compare_exchange_weak(__expected, __desired, __order,
  1122. __cmpexch_failure_order(__order));
  1123. }
  1124. bool
  1125. compare_exchange_strong(_Tp& __expected, _Tp __desired,
  1126. memory_order __order = memory_order_seq_cst)
  1127. const noexcept
  1128. {
  1129. return compare_exchange_strong(__expected, __desired, __order,
  1130. __cmpexch_failure_order(__order));
  1131. }
  1132. value_type
  1133. fetch_add(value_type __i,
  1134. memory_order __m = memory_order_seq_cst) const noexcept
  1135. { return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
  1136. value_type
  1137. fetch_sub(value_type __i,
  1138. memory_order __m = memory_order_seq_cst) const noexcept
  1139. { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
  1140. value_type
  1141. fetch_and(value_type __i,
  1142. memory_order __m = memory_order_seq_cst) const noexcept
  1143. { return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
  1144. value_type
  1145. fetch_or(value_type __i,
  1146. memory_order __m = memory_order_seq_cst) const noexcept
  1147. { return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
  1148. value_type
  1149. fetch_xor(value_type __i,
  1150. memory_order __m = memory_order_seq_cst) const noexcept
  1151. { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
  1152. _GLIBCXX_ALWAYS_INLINE value_type
  1153. operator++(int) const noexcept
  1154. { return fetch_add(1); }
  1155. _GLIBCXX_ALWAYS_INLINE value_type
  1156. operator--(int) const noexcept
  1157. { return fetch_sub(1); }
  1158. value_type
  1159. operator++() const noexcept
  1160. { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
  1161. value_type
  1162. operator--() const noexcept
  1163. { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
  1164. value_type
  1165. operator+=(value_type __i) const noexcept
  1166. { return __atomic_impl::__add_fetch(_M_ptr, __i); }
  1167. value_type
  1168. operator-=(value_type __i) const noexcept
  1169. { return __atomic_impl::__sub_fetch(_M_ptr, __i); }
  1170. value_type
  1171. operator&=(value_type __i) const noexcept
  1172. { return __atomic_impl::__and_fetch(_M_ptr, __i); }
  1173. value_type
  1174. operator|=(value_type __i) const noexcept
  1175. { return __atomic_impl::__or_fetch(_M_ptr, __i); }
  1176. value_type
  1177. operator^=(value_type __i) const noexcept
  1178. { return __atomic_impl::__xor_fetch(_M_ptr, __i); }
  1179. private:
  1180. _Tp* _M_ptr;
  1181. };
  1182. // base class for atomic_ref<floating-point-type>
  1183. template<typename _Fp>
  1184. struct __atomic_ref<_Fp, false, true>
  1185. {
  1186. static_assert(is_floating_point_v<_Fp>);
  1187. public:
  1188. using value_type = _Fp;
  1189. using difference_type = value_type;
  1190. static constexpr bool is_always_lock_free
  1191. = __atomic_always_lock_free(sizeof(_Fp), 0);
  1192. static constexpr size_t required_alignment = __alignof__(_Fp);
  1193. __atomic_ref() = delete;
  1194. __atomic_ref& operator=(const __atomic_ref&) = delete;
  1195. explicit
  1196. __atomic_ref(_Fp& __t) : _M_ptr(&__t)
  1197. { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
  1198. __atomic_ref(const __atomic_ref&) noexcept = default;
  1199. _Fp
  1200. operator=(_Fp __t) const noexcept
  1201. {
  1202. this->store(__t);
  1203. return __t;
  1204. }
  1205. operator _Fp() const noexcept { return this->load(); }
  1206. bool
  1207. is_lock_free() const noexcept
  1208. {
  1209. return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
  1210. }
  1211. void
  1212. store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept
  1213. { __atomic_impl::store(_M_ptr, __t, __m); }
  1214. _Fp
  1215. load(memory_order __m = memory_order_seq_cst) const noexcept
  1216. { return __atomic_impl::load(_M_ptr, __m); }
  1217. _Fp
  1218. exchange(_Fp __desired,
  1219. memory_order __m = memory_order_seq_cst) const noexcept
  1220. { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
  1221. bool
  1222. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  1223. memory_order __success,
  1224. memory_order __failure) const noexcept
  1225. {
  1226. return __atomic_impl::compare_exchange_weak(_M_ptr,
  1227. __expected, __desired,
  1228. __success, __failure);
  1229. }
  1230. bool
  1231. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  1232. memory_order __success,
  1233. memory_order __failure) const noexcept
  1234. {
  1235. return __atomic_impl::compare_exchange_strong(_M_ptr,
  1236. __expected, __desired,
  1237. __success, __failure);
  1238. }
  1239. bool
  1240. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  1241. memory_order __order = memory_order_seq_cst)
  1242. const noexcept
  1243. {
  1244. return compare_exchange_weak(__expected, __desired, __order,
  1245. __cmpexch_failure_order(__order));
  1246. }
  1247. bool
  1248. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  1249. memory_order __order = memory_order_seq_cst)
  1250. const noexcept
  1251. {
  1252. return compare_exchange_strong(__expected, __desired, __order,
  1253. __cmpexch_failure_order(__order));
  1254. }
  1255. value_type
  1256. fetch_add(value_type __i,
  1257. memory_order __m = memory_order_seq_cst) const noexcept
  1258. { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
  1259. value_type
  1260. fetch_sub(value_type __i,
  1261. memory_order __m = memory_order_seq_cst) const noexcept
  1262. { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
  1263. value_type
  1264. operator+=(value_type __i) const noexcept
  1265. { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
  1266. value_type
  1267. operator-=(value_type __i) const noexcept
  1268. { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
  1269. private:
  1270. _Fp* _M_ptr;
  1271. };
  1272. // base class for atomic_ref<pointer-type>
  1273. template<typename _Tp>
  1274. struct __atomic_ref<_Tp*, false, false>
  1275. {
  1276. public:
  1277. using value_type = _Tp*;
  1278. using difference_type = ptrdiff_t;
  1279. static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
  1280. static constexpr size_t required_alignment = __alignof__(_Tp*);
  1281. __atomic_ref() = delete;
  1282. __atomic_ref& operator=(const __atomic_ref&) = delete;
  1283. explicit
  1284. __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t))
  1285. { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
  1286. __atomic_ref(const __atomic_ref&) noexcept = default;
  1287. _Tp*
  1288. operator=(_Tp* __t) const noexcept
  1289. {
  1290. this->store(__t);
  1291. return __t;
  1292. }
  1293. operator _Tp*() const noexcept { return this->load(); }
  1294. bool
  1295. is_lock_free() const noexcept
  1296. {
  1297. return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
  1298. }
  1299. void
  1300. store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept
  1301. { __atomic_impl::store(_M_ptr, __t, __m); }
  1302. _Tp*
  1303. load(memory_order __m = memory_order_seq_cst) const noexcept
  1304. { return __atomic_impl::load(_M_ptr, __m); }
  1305. _Tp*
  1306. exchange(_Tp* __desired,
  1307. memory_order __m = memory_order_seq_cst) const noexcept
  1308. { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
  1309. bool
  1310. compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
  1311. memory_order __success,
  1312. memory_order __failure) const noexcept
  1313. {
  1314. return __atomic_impl::compare_exchange_weak(_M_ptr,
  1315. __expected, __desired,
  1316. __success, __failure);
  1317. }
  1318. bool
  1319. compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
  1320. memory_order __success,
  1321. memory_order __failure) const noexcept
  1322. {
  1323. return __atomic_impl::compare_exchange_strong(_M_ptr,
  1324. __expected, __desired,
  1325. __success, __failure);
  1326. }
  1327. bool
  1328. compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
  1329. memory_order __order = memory_order_seq_cst)
  1330. const noexcept
  1331. {
  1332. return compare_exchange_weak(__expected, __desired, __order,
  1333. __cmpexch_failure_order(__order));
  1334. }
  1335. bool
  1336. compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
  1337. memory_order __order = memory_order_seq_cst)
  1338. const noexcept
  1339. {
  1340. return compare_exchange_strong(__expected, __desired, __order,
  1341. __cmpexch_failure_order(__order));
  1342. }
  1343. _GLIBCXX_ALWAYS_INLINE value_type
  1344. fetch_add(difference_type __d,
  1345. memory_order __m = memory_order_seq_cst) const noexcept
  1346. { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
  1347. _GLIBCXX_ALWAYS_INLINE value_type
  1348. fetch_sub(difference_type __d,
  1349. memory_order __m = memory_order_seq_cst) const noexcept
  1350. { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
  1351. value_type
  1352. operator++(int) const noexcept
  1353. { return fetch_add(1); }
  1354. value_type
  1355. operator--(int) const noexcept
  1356. { return fetch_sub(1); }
  1357. value_type
  1358. operator++() const noexcept
  1359. {
  1360. return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
  1361. }
  1362. value_type
  1363. operator--() const noexcept
  1364. {
  1365. return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
  1366. }
  1367. value_type
  1368. operator+=(difference_type __d) const noexcept
  1369. {
  1370. return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
  1371. }
  1372. value_type
  1373. operator-=(difference_type __d) const noexcept
  1374. {
  1375. return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
  1376. }
  1377. private:
  1378. static constexpr ptrdiff_t
  1379. _S_type_size(ptrdiff_t __d) noexcept
  1380. {
  1381. static_assert(is_object_v<_Tp>);
  1382. return __d * sizeof(_Tp);
  1383. }
  1384. _Tp** _M_ptr;
  1385. };
  1386. #endif // C++2a
  1387. // @} group atomics
  1388. _GLIBCXX_END_NAMESPACE_VERSION
  1389. } // namespace std
  1390. #endif