Naze32 clone with Frysky receiver
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

core_cm4_simd.h 22KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673
  1. /**************************************************************************//**
  2. * @file core_cm4_simd.h
  3. * @brief CMSIS Cortex-M4 SIMD Header File
  4. * @version V3.20
  5. * @date 25. February 2013
  6. *
  7. * @note
  8. *
  9. ******************************************************************************/
  10. /* Copyright (c) 2009 - 2013 ARM LIMITED
  11. All rights reserved.
  12. Redistribution and use in source and binary forms, with or without
  13. modification, are permitted provided that the following conditions are met:
  14. - Redistributions of source code must retain the above copyright
  15. notice, this list of conditions and the following disclaimer.
  16. - Redistributions in binary form must reproduce the above copyright
  17. notice, this list of conditions and the following disclaimer in the
  18. documentation and/or other materials provided with the distribution.
  19. - Neither the name of ARM nor the names of its contributors may be used
  20. to endorse or promote products derived from this software without
  21. specific prior written permission.
  22. *
  23. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  24. AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  25. IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  26. ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
  27. LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  28. CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  29. SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  30. INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  31. CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  32. ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  33. POSSIBILITY OF SUCH DAMAGE.
  34. ---------------------------------------------------------------------------*/
  35. #ifdef __cplusplus
  36. extern "C" {
  37. #endif
  38. #ifndef __CORE_CM4_SIMD_H
  39. #define __CORE_CM4_SIMD_H
  40. /*******************************************************************************
  41. * Hardware Abstraction Layer
  42. ******************************************************************************/
  43. /* ################### Compiler specific Intrinsics ########################### */
  44. /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
  45. Access to dedicated SIMD instructions
  46. @{
  47. */
  48. #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
  49. /* ARM armcc specific functions */
  50. /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
  51. #define __SADD8 __sadd8
  52. #define __QADD8 __qadd8
  53. #define __SHADD8 __shadd8
  54. #define __UADD8 __uadd8
  55. #define __UQADD8 __uqadd8
  56. #define __UHADD8 __uhadd8
  57. #define __SSUB8 __ssub8
  58. #define __QSUB8 __qsub8
  59. #define __SHSUB8 __shsub8
  60. #define __USUB8 __usub8
  61. #define __UQSUB8 __uqsub8
  62. #define __UHSUB8 __uhsub8
  63. #define __SADD16 __sadd16
  64. #define __QADD16 __qadd16
  65. #define __SHADD16 __shadd16
  66. #define __UADD16 __uadd16
  67. #define __UQADD16 __uqadd16
  68. #define __UHADD16 __uhadd16
  69. #define __SSUB16 __ssub16
  70. #define __QSUB16 __qsub16
  71. #define __SHSUB16 __shsub16
  72. #define __USUB16 __usub16
  73. #define __UQSUB16 __uqsub16
  74. #define __UHSUB16 __uhsub16
  75. #define __SASX __sasx
  76. #define __QASX __qasx
  77. #define __SHASX __shasx
  78. #define __UASX __uasx
  79. #define __UQASX __uqasx
  80. #define __UHASX __uhasx
  81. #define __SSAX __ssax
  82. #define __QSAX __qsax
  83. #define __SHSAX __shsax
  84. #define __USAX __usax
  85. #define __UQSAX __uqsax
  86. #define __UHSAX __uhsax
  87. #define __USAD8 __usad8
  88. #define __USADA8 __usada8
  89. #define __SSAT16 __ssat16
  90. #define __USAT16 __usat16
  91. #define __UXTB16 __uxtb16
  92. #define __UXTAB16 __uxtab16
  93. #define __SXTB16 __sxtb16
  94. #define __SXTAB16 __sxtab16
  95. #define __SMUAD __smuad
  96. #define __SMUADX __smuadx
  97. #define __SMLAD __smlad
  98. #define __SMLADX __smladx
  99. #define __SMLALD __smlald
  100. #define __SMLALDX __smlaldx
  101. #define __SMUSD __smusd
  102. #define __SMUSDX __smusdx
  103. #define __SMLSD __smlsd
  104. #define __SMLSDX __smlsdx
  105. #define __SMLSLD __smlsld
  106. #define __SMLSLDX __smlsldx
  107. #define __SEL __sel
  108. #define __QADD __qadd
  109. #define __QSUB __qsub
  110. #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
  111. ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
  112. #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
  113. ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
  114. #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
  115. ((int64_t)(ARG3) << 32) ) >> 32))
  116. /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
  117. #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
  118. /* IAR iccarm specific functions */
  119. /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
  120. #include <cmsis_iar.h>
  121. /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
  122. #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
  123. /* TI CCS specific functions */
  124. /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
  125. #include <cmsis_ccs.h>
  126. /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
  127. #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
  128. /* GNU gcc specific functions */
  129. /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
  130. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
  131. {
  132. uint32_t result;
  133. __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  134. return(result);
  135. }
  136. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
  137. {
  138. uint32_t result;
  139. __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  140. return(result);
  141. }
  142. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
  143. {
  144. uint32_t result;
  145. __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  146. return(result);
  147. }
  148. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
  149. {
  150. uint32_t result;
  151. __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  152. return(result);
  153. }
  154. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
  155. {
  156. uint32_t result;
  157. __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  158. return(result);
  159. }
  160. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
  161. {
  162. uint32_t result;
  163. __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  164. return(result);
  165. }
  166. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
  167. {
  168. uint32_t result;
  169. __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  170. return(result);
  171. }
  172. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
  173. {
  174. uint32_t result;
  175. __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  176. return(result);
  177. }
  178. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
  179. {
  180. uint32_t result;
  181. __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  182. return(result);
  183. }
  184. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
  185. {
  186. uint32_t result;
  187. __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  188. return(result);
  189. }
  190. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
  191. {
  192. uint32_t result;
  193. __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  194. return(result);
  195. }
  196. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
  197. {
  198. uint32_t result;
  199. __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  200. return(result);
  201. }
  202. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
  203. {
  204. uint32_t result;
  205. __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  206. return(result);
  207. }
  208. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
  209. {
  210. uint32_t result;
  211. __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  212. return(result);
  213. }
  214. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
  215. {
  216. uint32_t result;
  217. __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  218. return(result);
  219. }
  220. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
  221. {
  222. uint32_t result;
  223. __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  224. return(result);
  225. }
  226. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
  227. {
  228. uint32_t result;
  229. __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  230. return(result);
  231. }
  232. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
  233. {
  234. uint32_t result;
  235. __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  236. return(result);
  237. }
  238. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
  239. {
  240. uint32_t result;
  241. __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  242. return(result);
  243. }
  244. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
  245. {
  246. uint32_t result;
  247. __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  248. return(result);
  249. }
  250. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
  251. {
  252. uint32_t result;
  253. __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  254. return(result);
  255. }
  256. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
  257. {
  258. uint32_t result;
  259. __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  260. return(result);
  261. }
  262. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
  263. {
  264. uint32_t result;
  265. __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  266. return(result);
  267. }
  268. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
  269. {
  270. uint32_t result;
  271. __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  272. return(result);
  273. }
  274. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
  275. {
  276. uint32_t result;
  277. __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  278. return(result);
  279. }
  280. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
  281. {
  282. uint32_t result;
  283. __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  284. return(result);
  285. }
  286. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
  287. {
  288. uint32_t result;
  289. __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  290. return(result);
  291. }
  292. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
  293. {
  294. uint32_t result;
  295. __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  296. return(result);
  297. }
  298. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
  299. {
  300. uint32_t result;
  301. __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  302. return(result);
  303. }
  304. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
  305. {
  306. uint32_t result;
  307. __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  308. return(result);
  309. }
  310. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
  311. {
  312. uint32_t result;
  313. __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  314. return(result);
  315. }
  316. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
  317. {
  318. uint32_t result;
  319. __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  320. return(result);
  321. }
  322. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
  323. {
  324. uint32_t result;
  325. __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  326. return(result);
  327. }
  328. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
  329. {
  330. uint32_t result;
  331. __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  332. return(result);
  333. }
  334. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
  335. {
  336. uint32_t result;
  337. __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  338. return(result);
  339. }
  340. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
  341. {
  342. uint32_t result;
  343. __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  344. return(result);
  345. }
  346. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
  347. {
  348. uint32_t result;
  349. __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  350. return(result);
  351. }
  352. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
  353. {
  354. uint32_t result;
  355. __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
  356. return(result);
  357. }
  358. #define __SSAT16(ARG1,ARG2) \
  359. ({ \
  360. uint32_t __RES, __ARG1 = (ARG1); \
  361. __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
  362. __RES; \
  363. })
  364. #define __USAT16(ARG1,ARG2) \
  365. ({ \
  366. uint32_t __RES, __ARG1 = (ARG1); \
  367. __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
  368. __RES; \
  369. })
  370. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
  371. {
  372. uint32_t result;
  373. __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
  374. return(result);
  375. }
  376. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
  377. {
  378. uint32_t result;
  379. __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  380. return(result);
  381. }
  382. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
  383. {
  384. uint32_t result;
  385. __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
  386. return(result);
  387. }
  388. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
  389. {
  390. uint32_t result;
  391. __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  392. return(result);
  393. }
  394. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
  395. {
  396. uint32_t result;
  397. __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  398. return(result);
  399. }
  400. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
  401. {
  402. uint32_t result;
  403. __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  404. return(result);
  405. }
  406. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
  407. {
  408. uint32_t result;
  409. __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
  410. return(result);
  411. }
  412. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
  413. {
  414. uint32_t result;
  415. __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
  416. return(result);
  417. }
  418. #define __SMLALD(ARG1,ARG2,ARG3) \
  419. ({ \
  420. uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
  421. __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
  422. (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
  423. })
  424. #define __SMLALDX(ARG1,ARG2,ARG3) \
  425. ({ \
  426. uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
  427. __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
  428. (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
  429. })
  430. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
  431. {
  432. uint32_t result;
  433. __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  434. return(result);
  435. }
  436. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
  437. {
  438. uint32_t result;
  439. __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  440. return(result);
  441. }
  442. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
  443. {
  444. uint32_t result;
  445. __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
  446. return(result);
  447. }
  448. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
  449. {
  450. uint32_t result;
  451. __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
  452. return(result);
  453. }
  454. #define __SMLSLD(ARG1,ARG2,ARG3) \
  455. ({ \
  456. uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
  457. __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
  458. (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
  459. })
  460. #define __SMLSLDX(ARG1,ARG2,ARG3) \
  461. ({ \
  462. uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
  463. __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
  464. (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
  465. })
  466. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
  467. {
  468. uint32_t result;
  469. __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  470. return(result);
  471. }
  472. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
  473. {
  474. uint32_t result;
  475. __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  476. return(result);
  477. }
  478. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
  479. {
  480. uint32_t result;
  481. __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  482. return(result);
  483. }
  484. #define __PKHBT(ARG1,ARG2,ARG3) \
  485. ({ \
  486. uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
  487. __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
  488. __RES; \
  489. })
  490. #define __PKHTB(ARG1,ARG2,ARG3) \
  491. ({ \
  492. uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
  493. if (ARG3 == 0) \
  494. __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
  495. else \
  496. __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
  497. __RES; \
  498. })
  499. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
  500. {
  501. int32_t result;
  502. __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
  503. return(result);
  504. }
  505. /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
  506. #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
  507. /* TASKING carm specific functions */
  508. /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
  509. /* not yet supported */
  510. /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
  511. #endif
  512. /*@} end of group CMSIS_SIMD_intrinsics */
  513. #endif /* __CORE_CM4_SIMD_H */
  514. #ifdef __cplusplus
  515. }
  516. #endif