Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

arm_helium_utils.h 9.8KB

pirms 3 gadiem
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_helium_utils.h
  4. * Description: Utility functions for Helium development
  5. *
  6. * $Date: 09. September 2019
  7. * $Revision: V.1.5.1
  8. *
  9. * Target Processor: Cortex-M cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #ifndef _ARM_UTILS_HELIUM_H_
  29. #define _ARM_UTILS_HELIUM_H_
  30. /***************************************
  31. Definitions available for MVEF and MVEI
  32. ***************************************/
  33. #if defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI)
  34. #define INACTIVELANE 0 /* inactive lane content */
  35. #endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI) */
  36. /***************************************
  37. Definitions available for MVEF only
  38. ***************************************/
  39. #if defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF)
  40. __STATIC_FORCEINLINE float32_t vecAddAcrossF32Mve(float32x4_t in)
  41. {
  42. float32_t acc;
  43. acc = vgetq_lane(in, 0) + vgetq_lane(in, 1) +
  44. vgetq_lane(in, 2) + vgetq_lane(in, 3);
  45. return acc;
  46. }
  47. /* newton initial guess */
  48. #define INVSQRT_MAGIC_F32 0x5f3759df
  49. #define INVSQRT_NEWTON_MVE_F32(invSqrt, xHalf, xStart)\
  50. { \
  51. float32x4_t tmp; \
  52. \
  53. /* tmp = xhalf * x * x */ \
  54. tmp = vmulq(xStart, xStart); \
  55. tmp = vmulq(tmp, xHalf); \
  56. /* (1.5f - xhalf * x * x) */ \
  57. tmp = vsubq(vdupq_n_f32(1.5f), tmp); \
  58. /* x = x*(1.5f-xhalf*x*x); */ \
  59. invSqrt = vmulq(tmp, xStart); \
  60. }
  61. #endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) */
  62. /***************************************
  63. Definitions available for MVEI only
  64. ***************************************/
  65. #if defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEI)
  66. #include "arm_common_tables.h"
  67. /* Following functions are used to transpose matrix in f32 and q31 cases */
  68. __STATIC_INLINE arm_status arm_mat_trans_32bit_2x2_mve(
  69. uint32_t * pDataSrc,
  70. uint32_t * pDataDest)
  71. {
  72. static const uint32x4_t vecOffs = { 0, 2, 1, 3 };
  73. /*
  74. *
  75. * | 0 1 | => | 0 2 |
  76. * | 2 3 | | 1 3 |
  77. *
  78. */
  79. uint32x4_t vecIn = vldrwq_u32((uint32_t const *)pDataSrc);
  80. vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs, vecIn);
  81. return (ARM_MATH_SUCCESS);
  82. }
  83. __STATIC_INLINE arm_status arm_mat_trans_32bit_3x3_mve(
  84. uint32_t * pDataSrc,
  85. uint32_t * pDataDest)
  86. {
  87. const uint32x4_t vecOffs1 = { 0, 3, 6, 1};
  88. const uint32x4_t vecOffs2 = { 4, 7, 2, 5};
  89. /*
  90. *
  91. * | 0 1 2 | | 0 3 6 | 4 x 32 flattened version | 0 3 6 1 |
  92. * | 3 4 5 | => | 1 4 7 | => | 4 7 2 5 |
  93. * | 6 7 8 | | 2 5 8 | (row major) | 8 . . . |
  94. *
  95. */
  96. uint32x4_t vecIn1 = vldrwq_u32((uint32_t const *) pDataSrc);
  97. uint32x4_t vecIn2 = vldrwq_u32((uint32_t const *) &pDataSrc[4]);
  98. vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs1, vecIn1);
  99. vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs2, vecIn2);
  100. pDataDest[8] = pDataSrc[8];
  101. return (ARM_MATH_SUCCESS);
  102. }
  103. __STATIC_INLINE arm_status arm_mat_trans_32bit_4x4_mve(uint32_t * pDataSrc, uint32_t * pDataDest)
  104. {
  105. /*
  106. * 4x4 Matrix transposition
  107. * is 4 x de-interleave operation
  108. *
  109. * 0 1 2 3 0 4 8 12
  110. * 4 5 6 7 1 5 9 13
  111. * 8 9 10 11 2 6 10 14
  112. * 12 13 14 15 3 7 11 15
  113. */
  114. uint32x4x4_t vecIn;
  115. vecIn = vld4q((uint32_t const *) pDataSrc);
  116. vstrwq(pDataDest, vecIn.val[0]);
  117. pDataDest += 4;
  118. vstrwq(pDataDest, vecIn.val[1]);
  119. pDataDest += 4;
  120. vstrwq(pDataDest, vecIn.val[2]);
  121. pDataDest += 4;
  122. vstrwq(pDataDest, vecIn.val[3]);
  123. return (ARM_MATH_SUCCESS);
  124. }
  125. __STATIC_INLINE arm_status arm_mat_trans_32bit_generic_mve(
  126. uint16_t srcRows,
  127. uint16_t srcCols,
  128. uint32_t * pDataSrc,
  129. uint32_t * pDataDest)
  130. {
  131. uint32x4_t vecOffs;
  132. uint32_t i;
  133. uint32_t blkCnt;
  134. uint32_t const *pDataC;
  135. uint32_t *pDataDestR;
  136. uint32x4_t vecIn;
  137. vecOffs = vidupq_u32((uint32_t)0, 1);
  138. vecOffs = vecOffs * srcCols;
  139. i = srcCols;
  140. do
  141. {
  142. pDataC = (uint32_t const *) pDataSrc;
  143. pDataDestR = pDataDest;
  144. blkCnt = srcRows >> 2;
  145. while (blkCnt > 0U)
  146. {
  147. vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs);
  148. vstrwq(pDataDestR, vecIn);
  149. pDataDestR += 4;
  150. pDataC = pDataC + srcCols * 4;
  151. /*
  152. * Decrement the blockSize loop counter
  153. */
  154. blkCnt--;
  155. }
  156. /*
  157. * tail
  158. */
  159. blkCnt = srcRows & 3;
  160. if (blkCnt > 0U)
  161. {
  162. mve_pred16_t p0 = vctp32q(blkCnt);
  163. vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs);
  164. vstrwq_p(pDataDestR, vecIn, p0);
  165. }
  166. pDataSrc += 1;
  167. pDataDest += srcRows;
  168. }
  169. while (--i);
  170. return (ARM_MATH_SUCCESS);
  171. }
  172. #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q31_MVE)
  173. __STATIC_INLINE q31x4_t FAST_VSQRT_Q31(q31x4_t vecIn)
  174. {
  175. q63x2_t vecTmpLL;
  176. q31x4_t vecTmp0, vecTmp1;
  177. q31_t scale;
  178. q63_t tmp64;
  179. q31x4_t vecNrm, vecDst, vecIdx, vecSignBits;
  180. vecSignBits = vclsq(vecIn);
  181. vecSignBits = vbicq(vecSignBits, 1);
  182. /*
  183. * in = in << no_of_sign_bits;
  184. */
  185. vecNrm = vshlq(vecIn, vecSignBits);
  186. /*
  187. * index = in >> 24;
  188. */
  189. vecIdx = vecNrm >> 24;
  190. vecIdx = vecIdx << 1;
  191. vecTmp0 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, vecIdx);
  192. vecIdx = vecIdx + 1;
  193. vecTmp1 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, vecIdx);
  194. vecTmp1 = vqrdmulhq(vecTmp1, vecNrm);
  195. vecTmp0 = vecTmp0 - vecTmp1;
  196. vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0);
  197. vecTmp1 = vqrdmulhq(vecNrm, vecTmp1);
  198. vecTmp1 = vdupq_n_s32(0x18000000) - vecTmp1;
  199. vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1);
  200. vecTmpLL = vmullbq_int(vecNrm, vecTmp0);
  201. /*
  202. * scale elements 0, 2
  203. */
  204. scale = 26 + (vecSignBits[0] >> 1);
  205. tmp64 = asrl(vecTmpLL[0], scale);
  206. vecDst[0] = (q31_t) tmp64;
  207. scale = 26 + (vecSignBits[2] >> 1);
  208. tmp64 = asrl(vecTmpLL[1], scale);
  209. vecDst[2] = (q31_t) tmp64;
  210. vecTmpLL = vmulltq_int(vecNrm, vecTmp0);
  211. /*
  212. * scale elements 1, 3
  213. */
  214. scale = 26 + (vecSignBits[1] >> 1);
  215. tmp64 = asrl(vecTmpLL[0], scale);
  216. vecDst[1] = (q31_t) tmp64;
  217. scale = 26 + (vecSignBits[3] >> 1);
  218. tmp64 = asrl(vecTmpLL[1], scale);
  219. vecDst[3] = (q31_t) tmp64;
  220. /*
  221. * set negative values to 0
  222. */
  223. vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s32(vecIn, 0));
  224. return vecDst;
  225. }
  226. #endif
  227. #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q15_MVE)
  228. __STATIC_INLINE q15x8_t FAST_VSQRT_Q15(q15x8_t vecIn)
  229. {
  230. q31x4_t vecTmpLev, vecTmpLodd, vecSignL;
  231. q15x8_t vecTmp0, vecTmp1;
  232. q15x8_t vecNrm, vecDst, vecIdx, vecSignBits;
  233. vecDst = vuninitializedq_s16();
  234. vecSignBits = vclsq(vecIn);
  235. vecSignBits = vbicq(vecSignBits, 1);
  236. /*
  237. * in = in << no_of_sign_bits;
  238. */
  239. vecNrm = vshlq(vecIn, vecSignBits);
  240. vecIdx = vecNrm >> 8;
  241. vecIdx = vecIdx << 1;
  242. vecTmp0 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, vecIdx);
  243. vecIdx = vecIdx + 1;
  244. vecTmp1 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, vecIdx);
  245. vecTmp1 = vqrdmulhq(vecTmp1, vecNrm);
  246. vecTmp0 = vecTmp0 - vecTmp1;
  247. vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0);
  248. vecTmp1 = vqrdmulhq(vecNrm, vecTmp1);
  249. vecTmp1 = vdupq_n_s16(0x1800) - vecTmp1;
  250. vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1);
  251. vecSignBits = vecSignBits >> 1;
  252. vecTmpLev = vmullbq_int(vecNrm, vecTmp0);
  253. vecTmpLodd = vmulltq_int(vecNrm, vecTmp0);
  254. vecTmp0 = vecSignBits + 10;
  255. /*
  256. * negate sign to apply register based vshl
  257. */
  258. vecTmp0 = -vecTmp0;
  259. /*
  260. * shift even elements
  261. */
  262. vecSignL = vmovlbq(vecTmp0);
  263. vecTmpLev = vshlq(vecTmpLev, vecSignL);
  264. /*
  265. * shift odd elements
  266. */
  267. vecSignL = vmovltq(vecTmp0);
  268. vecTmpLodd = vshlq(vecTmpLodd, vecSignL);
  269. /*
  270. * merge and narrow odd and even parts
  271. */
  272. vecDst = vmovnbq_s32(vecDst, vecTmpLev);
  273. vecDst = vmovntq_s32(vecDst, vecTmpLodd);
  274. /*
  275. * set negative values to 0
  276. */
  277. vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s16(vecIn, 0));
  278. return vecDst;
  279. }
  280. #endif
  281. #endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEI) */
  282. #endif