Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

678 lines
21KB

  1. // The template and inlines for the -*- C++ -*- internal _Array helper class.
  2. // Copyright (C) 1997-2020 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file bits/valarray_array.h
  21. * This is an internal header file, included by other library headers.
  22. * Do not attempt to use it directly. @headername{valarray}
  23. */
  24. // Written by Gabriel Dos Reis <Gabriel.Dos-Reis@DPTMaths.ENS-Cachan.Fr>
  25. #ifndef _VALARRAY_ARRAY_H
  26. #define _VALARRAY_ARRAY_H 1
  27. #pragma GCC system_header
  28. #include <bits/c++config.h>
  29. #include <bits/cpp_type_traits.h>
  30. #include <cstdlib>
  31. #include <new>
  32. namespace std _GLIBCXX_VISIBILITY(default)
  33. {
  34. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  35. //
  36. // Helper functions on raw pointers
  37. //
  38. // We get memory the old fashioned way
  39. template<typename _Tp>
  40. _Tp*
  41. __valarray_get_storage(size_t) __attribute__((__malloc__));
  42. template<typename _Tp>
  43. inline _Tp*
  44. __valarray_get_storage(size_t __n)
  45. { return static_cast<_Tp*>(operator new(__n * sizeof(_Tp))); }
  46. // Return memory to the system
  47. inline void
  48. __valarray_release_memory(void* __p)
  49. { operator delete(__p); }
  50. // Turn a raw-memory into an array of _Tp filled with _Tp()
  51. // This is required in 'valarray<T> v(n);'
  52. template<typename _Tp, bool>
  53. struct _Array_default_ctor
  54. {
  55. // Please note that this isn't exception safe. But
  56. // valarrays aren't required to be exception safe.
  57. inline static void
  58. _S_do_it(_Tp* __b, _Tp* __e)
  59. {
  60. while (__b != __e)
  61. new(__b++) _Tp();
  62. }
  63. };
  64. template<typename _Tp>
  65. struct _Array_default_ctor<_Tp, true>
  66. {
  67. // For fundamental types, it suffices to say 'memset()'
  68. inline static void
  69. _S_do_it(_Tp* __b, _Tp* __e)
  70. { __builtin_memset(__b, 0, (__e - __b) * sizeof(_Tp)); }
  71. };
  72. template<typename _Tp>
  73. inline void
  74. __valarray_default_construct(_Tp* __b, _Tp* __e)
  75. {
  76. _Array_default_ctor<_Tp, __is_scalar<_Tp>::__value>::_S_do_it(__b, __e);
  77. }
  78. // Turn a raw-memory into an array of _Tp filled with __t
  79. // This is the required in valarray<T> v(n, t). Also
  80. // used in valarray<>::resize().
  81. template<typename _Tp, bool>
  82. struct _Array_init_ctor
  83. {
  84. // Please note that this isn't exception safe. But
  85. // valarrays aren't required to be exception safe.
  86. inline static void
  87. _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t)
  88. {
  89. while (__b != __e)
  90. new(__b++) _Tp(__t);
  91. }
  92. };
  93. template<typename _Tp>
  94. struct _Array_init_ctor<_Tp, true>
  95. {
  96. inline static void
  97. _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t)
  98. {
  99. while (__b != __e)
  100. *__b++ = __t;
  101. }
  102. };
  103. template<typename _Tp>
  104. inline void
  105. __valarray_fill_construct(_Tp* __b, _Tp* __e, const _Tp __t)
  106. {
  107. _Array_init_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __t);
  108. }
  109. //
  110. // copy-construct raw array [__o, *) from plain array [__b, __e)
  111. // We can't just say 'memcpy()'
  112. //
  113. template<typename _Tp, bool>
  114. struct _Array_copy_ctor
  115. {
  116. // Please note that this isn't exception safe. But
  117. // valarrays aren't required to be exception safe.
  118. inline static void
  119. _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o)
  120. {
  121. while (__b != __e)
  122. new(__o++) _Tp(*__b++);
  123. }
  124. };
  125. template<typename _Tp>
  126. struct _Array_copy_ctor<_Tp, true>
  127. {
  128. inline static void
  129. _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o)
  130. {
  131. if (__b)
  132. __builtin_memcpy(__o, __b, (__e - __b) * sizeof(_Tp));
  133. }
  134. };
  135. template<typename _Tp>
  136. inline void
  137. __valarray_copy_construct(const _Tp* __b, const _Tp* __e,
  138. _Tp* __restrict__ __o)
  139. {
  140. _Array_copy_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __o);
  141. }
  142. // copy-construct raw array [__o, *) from strided array __a[<__n : __s>]
  143. template<typename _Tp>
  144. inline void
  145. __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n,
  146. size_t __s, _Tp* __restrict__ __o)
  147. {
  148. if (__is_trivial(_Tp))
  149. while (__n--)
  150. {
  151. *__o++ = *__a;
  152. __a += __s;
  153. }
  154. else
  155. while (__n--)
  156. {
  157. new(__o++) _Tp(*__a);
  158. __a += __s;
  159. }
  160. }
  161. // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]]
  162. template<typename _Tp>
  163. inline void
  164. __valarray_copy_construct (const _Tp* __restrict__ __a,
  165. const size_t* __restrict__ __i,
  166. _Tp* __restrict__ __o, size_t __n)
  167. {
  168. if (__is_trivial(_Tp))
  169. while (__n--)
  170. *__o++ = __a[*__i++];
  171. else
  172. while (__n--)
  173. new (__o++) _Tp(__a[*__i++]);
  174. }
  175. // Do the necessary cleanup when we're done with arrays.
  176. template<typename _Tp>
  177. inline void
  178. __valarray_destroy_elements(_Tp* __b, _Tp* __e)
  179. {
  180. if (!__is_trivial(_Tp))
  181. while (__b != __e)
  182. {
  183. __b->~_Tp();
  184. ++__b;
  185. }
  186. }
  187. // Fill a plain array __a[<__n>] with __t
  188. template<typename _Tp>
  189. inline void
  190. __valarray_fill(_Tp* __restrict__ __a, size_t __n, const _Tp& __t)
  191. {
  192. while (__n--)
  193. *__a++ = __t;
  194. }
  195. // fill strided array __a[<__n-1 : __s>] with __t
  196. template<typename _Tp>
  197. inline void
  198. __valarray_fill(_Tp* __restrict__ __a, size_t __n,
  199. size_t __s, const _Tp& __t)
  200. {
  201. for (size_t __i = 0; __i < __n; ++__i, __a += __s)
  202. *__a = __t;
  203. }
  204. // fill indirect array __a[__i[<__n>]] with __i
  205. template<typename _Tp>
  206. inline void
  207. __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i,
  208. size_t __n, const _Tp& __t)
  209. {
  210. for (size_t __j = 0; __j < __n; ++__j, ++__i)
  211. __a[*__i] = __t;
  212. }
  213. // copy plain array __a[<__n>] in __b[<__n>]
  214. // For non-fundamental types, it is wrong to say 'memcpy()'
  215. template<typename _Tp, bool>
  216. struct _Array_copier
  217. {
  218. inline static void
  219. _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
  220. {
  221. while(__n--)
  222. *__b++ = *__a++;
  223. }
  224. };
  225. template<typename _Tp>
  226. struct _Array_copier<_Tp, true>
  227. {
  228. inline static void
  229. _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
  230. {
  231. if (__n != 0)
  232. __builtin_memcpy(__b, __a, __n * sizeof (_Tp));
  233. }
  234. };
  235. // Copy a plain array __a[<__n>] into a play array __b[<>]
  236. template<typename _Tp>
  237. inline void
  238. __valarray_copy(const _Tp* __restrict__ __a, size_t __n,
  239. _Tp* __restrict__ __b)
  240. {
  241. _Array_copier<_Tp, __is_trivial(_Tp)>::_S_do_it(__a, __n, __b);
  242. }
  243. // Copy strided array __a[<__n : __s>] in plain __b[<__n>]
  244. template<typename _Tp>
  245. inline void
  246. __valarray_copy(const _Tp* __restrict__ __a, size_t __n, size_t __s,
  247. _Tp* __restrict__ __b)
  248. {
  249. for (size_t __i = 0; __i < __n; ++__i, ++__b, __a += __s)
  250. *__b = *__a;
  251. }
  252. // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>]
  253. template<typename _Tp>
  254. inline void
  255. __valarray_copy(const _Tp* __restrict__ __a, _Tp* __restrict__ __b,
  256. size_t __n, size_t __s)
  257. {
  258. for (size_t __i = 0; __i < __n; ++__i, ++__a, __b += __s)
  259. *__b = *__a;
  260. }
  261. // Copy strided array __src[<__n : __s1>] into another
  262. // strided array __dst[< : __s2>]. Their sizes must match.
  263. template<typename _Tp>
  264. inline void
  265. __valarray_copy(const _Tp* __restrict__ __src, size_t __n, size_t __s1,
  266. _Tp* __restrict__ __dst, size_t __s2)
  267. {
  268. for (size_t __i = 0; __i < __n; ++__i)
  269. __dst[__i * __s2] = __src[__i * __s1];
  270. }
  271. // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>]
  272. template<typename _Tp>
  273. inline void
  274. __valarray_copy(const _Tp* __restrict__ __a,
  275. const size_t* __restrict__ __i,
  276. _Tp* __restrict__ __b, size_t __n)
  277. {
  278. for (size_t __j = 0; __j < __n; ++__j, ++__b, ++__i)
  279. *__b = __a[*__i];
  280. }
  281. // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]]
  282. template<typename _Tp>
  283. inline void
  284. __valarray_copy(const _Tp* __restrict__ __a, size_t __n,
  285. _Tp* __restrict__ __b, const size_t* __restrict__ __i)
  286. {
  287. for (size_t __j = 0; __j < __n; ++__j, ++__a, ++__i)
  288. __b[*__i] = *__a;
  289. }
  290. // Copy the __n first elements of an indexed array __src[<__i>] into
  291. // another indexed array __dst[<__j>].
  292. template<typename _Tp>
  293. inline void
  294. __valarray_copy(const _Tp* __restrict__ __src, size_t __n,
  295. const size_t* __restrict__ __i,
  296. _Tp* __restrict__ __dst, const size_t* __restrict__ __j)
  297. {
  298. for (size_t __k = 0; __k < __n; ++__k)
  299. __dst[*__j++] = __src[*__i++];
  300. }
  301. //
  302. // Compute the sum of elements in range [__f, __l) which must not be empty.
  303. // This is a naive algorithm. It suffers from cancelling.
  304. // In the future try to specialize for _Tp = float, double, long double
  305. // using a more accurate algorithm.
  306. //
  307. template<typename _Tp>
  308. inline _Tp
  309. __valarray_sum(const _Tp* __f, const _Tp* __l)
  310. {
  311. _Tp __r = *__f++;
  312. while (__f != __l)
  313. __r += *__f++;
  314. return __r;
  315. }
  316. // Compute the min/max of an array-expression
  317. template<typename _Ta>
  318. inline typename _Ta::value_type
  319. __valarray_min(const _Ta& __a)
  320. {
  321. size_t __s = __a.size();
  322. typedef typename _Ta::value_type _Value_type;
  323. _Value_type __r = __s == 0 ? _Value_type() : __a[0];
  324. for (size_t __i = 1; __i < __s; ++__i)
  325. {
  326. _Value_type __t = __a[__i];
  327. if (__t < __r)
  328. __r = __t;
  329. }
  330. return __r;
  331. }
  332. template<typename _Ta>
  333. inline typename _Ta::value_type
  334. __valarray_max(const _Ta& __a)
  335. {
  336. size_t __s = __a.size();
  337. typedef typename _Ta::value_type _Value_type;
  338. _Value_type __r = __s == 0 ? _Value_type() : __a[0];
  339. for (size_t __i = 1; __i < __s; ++__i)
  340. {
  341. _Value_type __t = __a[__i];
  342. if (__t > __r)
  343. __r = __t;
  344. }
  345. return __r;
  346. }
  347. //
  348. // Helper class _Array, first layer of valarray abstraction.
  349. // All operations on valarray should be forwarded to this class
  350. // whenever possible. -- gdr
  351. //
  352. template<typename _Tp>
  353. struct _Array
  354. {
  355. explicit _Array(_Tp* const __restrict__);
  356. explicit _Array(const valarray<_Tp>&);
  357. _Array(const _Tp* __restrict__, size_t);
  358. _Tp* begin() const;
  359. _Tp* const __restrict__ _M_data;
  360. };
  361. // Copy-construct plain array __b[<__n>] from indexed array __a[__i[<__n>]]
  362. template<typename _Tp>
  363. inline void
  364. __valarray_copy_construct(_Array<_Tp> __a, _Array<size_t> __i,
  365. _Array<_Tp> __b, size_t __n)
  366. { std::__valarray_copy_construct(__a._M_data, __i._M_data,
  367. __b._M_data, __n); }
  368. // Copy-construct plain array __b[<__n>] from strided array __a[<__n : __s>]
  369. template<typename _Tp>
  370. inline void
  371. __valarray_copy_construct(_Array<_Tp> __a, size_t __n, size_t __s,
  372. _Array<_Tp> __b)
  373. { std::__valarray_copy_construct(__a._M_data, __n, __s, __b._M_data); }
  374. template<typename _Tp>
  375. inline void
  376. __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t)
  377. { std::__valarray_fill(__a._M_data, __n, __t); }
  378. template<typename _Tp>
  379. inline void
  380. __valarray_fill(_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t)
  381. { std::__valarray_fill(__a._M_data, __n, __s, __t); }
  382. template<typename _Tp>
  383. inline void
  384. __valarray_fill(_Array<_Tp> __a, _Array<size_t> __i,
  385. size_t __n, const _Tp& __t)
  386. { std::__valarray_fill(__a._M_data, __i._M_data, __n, __t); }
  387. // Copy a plain array __a[<__n>] into a play array __b[<>]
  388. template<typename _Tp>
  389. inline void
  390. __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b)
  391. { std::__valarray_copy(__a._M_data, __n, __b._M_data); }
  392. // Copy strided array __a[<__n : __s>] in plain __b[<__n>]
  393. template<typename _Tp>
  394. inline void
  395. __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b)
  396. { std::__valarray_copy(__a._M_data, __n, __s, __b._M_data); }
  397. // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>]
  398. template<typename _Tp>
  399. inline void
  400. __valarray_copy(_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s)
  401. { __valarray_copy(__a._M_data, __b._M_data, __n, __s); }
  402. // Copy strided array __src[<__n : __s1>] into another
  403. // strided array __dst[< : __s2>]. Their sizes must match.
  404. template<typename _Tp>
  405. inline void
  406. __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s1,
  407. _Array<_Tp> __b, size_t __s2)
  408. { std::__valarray_copy(__a._M_data, __n, __s1, __b._M_data, __s2); }
  409. // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>]
  410. template<typename _Tp>
  411. inline void
  412. __valarray_copy(_Array<_Tp> __a, _Array<size_t> __i,
  413. _Array<_Tp> __b, size_t __n)
  414. { std::__valarray_copy(__a._M_data, __i._M_data, __b._M_data, __n); }
  415. // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]]
  416. template<typename _Tp>
  417. inline void
  418. __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b,
  419. _Array<size_t> __i)
  420. { std::__valarray_copy(__a._M_data, __n, __b._M_data, __i._M_data); }
  421. // Copy the __n first elements of an indexed array __src[<__i>] into
  422. // another indexed array __dst[<__j>].
  423. template<typename _Tp>
  424. inline void
  425. __valarray_copy(_Array<_Tp> __src, size_t __n, _Array<size_t> __i,
  426. _Array<_Tp> __dst, _Array<size_t> __j)
  427. {
  428. std::__valarray_copy(__src._M_data, __n, __i._M_data,
  429. __dst._M_data, __j._M_data);
  430. }
  431. template<typename _Tp>
  432. inline
  433. _Array<_Tp>::_Array(_Tp* const __restrict__ __p)
  434. : _M_data (__p) {}
  435. template<typename _Tp>
  436. inline
  437. _Array<_Tp>::_Array(const valarray<_Tp>& __v)
  438. : _M_data (__v._M_data) {}
  439. template<typename _Tp>
  440. inline
  441. _Array<_Tp>::_Array(const _Tp* __restrict__ __b, size_t __s)
  442. : _M_data(__valarray_get_storage<_Tp>(__s))
  443. { std::__valarray_copy_construct(__b, __s, _M_data); }
  444. template<typename _Tp>
  445. inline _Tp*
  446. _Array<_Tp>::begin () const
  447. { return _M_data; }
  448. #define _DEFINE_ARRAY_FUNCTION(_Op, _Name) \
  449. template<typename _Tp> \
  450. inline void \
  451. _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, const _Tp& __t) \
  452. { \
  453. for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; ++__p) \
  454. *__p _Op##= __t; \
  455. } \
  456. \
  457. template<typename _Tp> \
  458. inline void \
  459. _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \
  460. { \
  461. _Tp* __p = __a._M_data; \
  462. for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; ++__p, ++__q) \
  463. *__p _Op##= *__q; \
  464. } \
  465. \
  466. template<typename _Tp, class _Dom> \
  467. void \
  468. _Array_augmented_##_Name(_Array<_Tp> __a, \
  469. const _Expr<_Dom, _Tp>& __e, size_t __n) \
  470. { \
  471. _Tp* __p(__a._M_data); \
  472. for (size_t __i = 0; __i < __n; ++__i, ++__p) \
  473. *__p _Op##= __e[__i]; \
  474. } \
  475. \
  476. template<typename _Tp> \
  477. inline void \
  478. _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, size_t __s, \
  479. _Array<_Tp> __b) \
  480. { \
  481. _Tp* __q(__b._M_data); \
  482. for (_Tp* __p = __a._M_data; __p < __a._M_data + __s * __n; \
  483. __p += __s, ++__q) \
  484. *__p _Op##= *__q; \
  485. } \
  486. \
  487. template<typename _Tp> \
  488. inline void \
  489. _Array_augmented_##_Name(_Array<_Tp> __a, _Array<_Tp> __b, \
  490. size_t __n, size_t __s) \
  491. { \
  492. _Tp* __q(__b._M_data); \
  493. for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \
  494. ++__p, __q += __s) \
  495. *__p _Op##= *__q; \
  496. } \
  497. \
  498. template<typename _Tp, class _Dom> \
  499. void \
  500. _Array_augmented_##_Name(_Array<_Tp> __a, size_t __s, \
  501. const _Expr<_Dom, _Tp>& __e, size_t __n) \
  502. { \
  503. _Tp* __p(__a._M_data); \
  504. for (size_t __i = 0; __i < __n; ++__i, __p += __s) \
  505. *__p _Op##= __e[__i]; \
  506. } \
  507. \
  508. template<typename _Tp> \
  509. inline void \
  510. _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \
  511. _Array<_Tp> __b, size_t __n) \
  512. { \
  513. _Tp* __q(__b._M_data); \
  514. for (size_t* __j = __i._M_data; __j < __i._M_data + __n; \
  515. ++__j, ++__q) \
  516. __a._M_data[*__j] _Op##= *__q; \
  517. } \
  518. \
  519. template<typename _Tp> \
  520. inline void \
  521. _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \
  522. _Array<_Tp> __b, _Array<size_t> __i) \
  523. { \
  524. _Tp* __p(__a._M_data); \
  525. for (size_t* __j = __i._M_data; __j<__i._M_data + __n; \
  526. ++__j, ++__p) \
  527. *__p _Op##= __b._M_data[*__j]; \
  528. } \
  529. \
  530. template<typename _Tp, class _Dom> \
  531. void \
  532. _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \
  533. const _Expr<_Dom, _Tp>& __e, size_t __n) \
  534. { \
  535. size_t* __j(__i._M_data); \
  536. for (size_t __k = 0; __k<__n; ++__k, ++__j) \
  537. __a._M_data[*__j] _Op##= __e[__k]; \
  538. } \
  539. \
  540. template<typename _Tp> \
  541. void \
  542. _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \
  543. _Array<_Tp> __b, size_t __n) \
  544. { \
  545. bool* __ok(__m._M_data); \
  546. _Tp* __p(__a._M_data); \
  547. for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; \
  548. ++__q, ++__ok, ++__p) \
  549. { \
  550. while (! *__ok) \
  551. { \
  552. ++__ok; \
  553. ++__p; \
  554. } \
  555. *__p _Op##= *__q; \
  556. } \
  557. } \
  558. \
  559. template<typename _Tp> \
  560. void \
  561. _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \
  562. _Array<_Tp> __b, _Array<bool> __m) \
  563. { \
  564. bool* __ok(__m._M_data); \
  565. _Tp* __q(__b._M_data); \
  566. for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \
  567. ++__p, ++__ok, ++__q) \
  568. { \
  569. while (! *__ok) \
  570. { \
  571. ++__ok; \
  572. ++__q; \
  573. } \
  574. *__p _Op##= *__q; \
  575. } \
  576. } \
  577. \
  578. template<typename _Tp, class _Dom> \
  579. void \
  580. _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \
  581. const _Expr<_Dom, _Tp>& __e, size_t __n) \
  582. { \
  583. bool* __ok(__m._M_data); \
  584. _Tp* __p(__a._M_data); \
  585. for (size_t __i = 0; __i < __n; ++__i, ++__ok, ++__p) \
  586. { \
  587. while (! *__ok) \
  588. { \
  589. ++__ok; \
  590. ++__p; \
  591. } \
  592. *__p _Op##= __e[__i]; \
  593. } \
  594. }
  595. _DEFINE_ARRAY_FUNCTION(+, __plus)
  596. _DEFINE_ARRAY_FUNCTION(-, __minus)
  597. _DEFINE_ARRAY_FUNCTION(*, __multiplies)
  598. _DEFINE_ARRAY_FUNCTION(/, __divides)
  599. _DEFINE_ARRAY_FUNCTION(%, __modulus)
  600. _DEFINE_ARRAY_FUNCTION(^, __bitwise_xor)
  601. _DEFINE_ARRAY_FUNCTION(|, __bitwise_or)
  602. _DEFINE_ARRAY_FUNCTION(&, __bitwise_and)
  603. _DEFINE_ARRAY_FUNCTION(<<, __shift_left)
  604. _DEFINE_ARRAY_FUNCTION(>>, __shift_right)
  605. #undef _DEFINE_ARRAY_FUNCTION
  606. _GLIBCXX_END_NAMESPACE_VERSION
  607. } // namespace
  608. # include <bits/valarray_array.tcc>
  609. #endif /* _ARRAY_H */