|
|
|
|
|
|
|
|
#endif |
|
|
#endif |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// computes limit(val, 2**bits) |
|
|
|
|
|
static inline int16_t saturate16(int32_t val) __attribute__((always_inline, unused)); |
|
|
|
|
|
static inline int16_t saturate16(int32_t val) |
|
|
|
|
|
{ |
|
|
|
|
|
#if defined(KINETISK) |
|
|
|
|
|
int16_t out; |
|
|
|
|
|
int32_t tmp; |
|
|
|
|
|
asm volatile("ssat %0, %1, %2" : "=r" (tmp) : "I" (16), "r" (val) ); |
|
|
|
|
|
out = (int16_t) (tmp & 0xffff); // not sure if the & 0xffff is necessary. test. |
|
|
|
|
|
return out; |
|
|
|
|
|
#elif defined(KINETISL) |
|
|
|
|
|
return 0; // TODO.... |
|
|
|
|
|
#endif |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
// computes ((a[31:0] * b[15:0]) >> 16) |
|
|
// computes ((a[31:0] * b[15:0]) >> 16) |
|
|
static inline int32_t signed_multiply_32x16b(int32_t a, uint32_t b) __attribute__((always_inline, unused)); |
|
|
static inline int32_t signed_multiply_32x16b(int32_t a, uint32_t b) __attribute__((always_inline, unused)); |
|
|
static inline int32_t signed_multiply_32x16b(int32_t a, uint32_t b) |
|
|
static inline int32_t signed_multiply_32x16b(int32_t a, uint32_t b) |
|
|
|
|
|
|
|
|
static inline int32_t multiply_32x32_rshift32(int32_t a, int32_t b) __attribute__((always_inline, unused)); |
|
|
static inline int32_t multiply_32x32_rshift32(int32_t a, int32_t b) __attribute__((always_inline, unused)); |
|
|
static inline int32_t multiply_32x32_rshift32(int32_t a, int32_t b) |
|
|
static inline int32_t multiply_32x32_rshift32(int32_t a, int32_t b) |
|
|
{ |
|
|
{ |
|
|
|
|
|
#if defined(KINETISK) |
|
|
int32_t out; |
|
|
int32_t out; |
|
|
asm volatile("smmul %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); |
|
|
asm volatile("smmul %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); |
|
|
return out; |
|
|
return out; |
|
|
|
|
|
#elif defined(KINETISL) |
|
|
|
|
|
return 0; // TODO.... |
|
|
|
|
|
#endif |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// computes (((int64_t)a[31:0] * (int64_t)b[31:0] + 0x8000000) >> 32) |
|
|
// computes (((int64_t)a[31:0] * (int64_t)b[31:0] + 0x8000000) >> 32) |
|
|
static inline int32_t multiply_32x32_rshift32_rounded(int32_t a, int32_t b) __attribute__((always_inline, unused)); |
|
|
static inline int32_t multiply_32x32_rshift32_rounded(int32_t a, int32_t b) __attribute__((always_inline, unused)); |
|
|
static inline int32_t multiply_32x32_rshift32_rounded(int32_t a, int32_t b) |
|
|
static inline int32_t multiply_32x32_rshift32_rounded(int32_t a, int32_t b) |
|
|
{ |
|
|
{ |
|
|
|
|
|
#if defined(KINETISK) |
|
|
int32_t out; |
|
|
int32_t out; |
|
|
asm volatile("smmulr %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); |
|
|
asm volatile("smmulr %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); |
|
|
return out; |
|
|
return out; |
|
|
|
|
|
#elif defined(KINETISL) |
|
|
|
|
|
return 0; // TODO.... |
|
|
|
|
|
#endif |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// computes sum + (((int64_t)a[31:0] * (int64_t)b[31:0] + 0x8000000) >> 32) |
|
|
// computes sum + (((int64_t)a[31:0] * (int64_t)b[31:0] + 0x8000000) >> 32) |
|
|
static inline int32_t multiply_accumulate_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) __attribute__((always_inline, unused)); |
|
|
static inline int32_t multiply_accumulate_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) __attribute__((always_inline, unused)); |
|
|
static inline int32_t multiply_accumulate_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) |
|
|
static inline int32_t multiply_accumulate_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) |
|
|
{ |
|
|
{ |
|
|
|
|
|
#if defined(KINETISK) |
|
|
int32_t out; |
|
|
int32_t out; |
|
|
asm volatile("smmlar %0, %2, %3, %1" : "=r" (out) : "r" (sum), "r" (a), "r" (b)); |
|
|
asm volatile("smmlar %0, %2, %3, %1" : "=r" (out) : "r" (sum), "r" (a), "r" (b)); |
|
|
return out; |
|
|
return out; |
|
|
|
|
|
#elif defined(KINETISL) |
|
|
|
|
|
return 0; // TODO.... |
|
|
|
|
|
#endif |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
// computes sum - (((int64_t)a[31:0] * (int64_t)b[31:0] + 0x8000000) >> 32) |
|
|
// computes sum - (((int64_t)a[31:0] * (int64_t)b[31:0] + 0x8000000) >> 32) |
|
|
static inline int32_t multiply_subtract_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) __attribute__((always_inline, unused)); |
|
|
static inline int32_t multiply_subtract_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) __attribute__((always_inline, unused)); |
|
|
static inline int32_t multiply_subtract_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) |
|
|
static inline int32_t multiply_subtract_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) |
|
|
{ |
|
|
{ |
|
|
|
|
|
#if defined(KINETISK) |
|
|
int32_t out; |
|
|
int32_t out; |
|
|
asm volatile("smmlsr %0, %2, %3, %1" : "=r" (out) : "r" (sum), "r" (a), "r" (b)); |
|
|
asm volatile("smmlsr %0, %2, %3, %1" : "=r" (out) : "r" (sum), "r" (a), "r" (b)); |
|
|
return out; |
|
|
return out; |
|
|
|
|
|
#elif defined(KINETISL) |
|
|
|
|
|
return 0; // TODO.... |
|
|
|
|
|
#endif |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
} |
|
|
} |
|
|
*/ |
|
|
*/ |
|
|
|
|
|
|
|
|
// computes (((a[31:16] + b[31:16]) << 16) | (a[15:0 + b[15:0])) |
|
|
|
|
|
|
|
|
// computes (((a[31:16] + b[31:16]) << 16) | (a[15:0 + b[15:0])) (saturates) |
|
|
static inline uint32_t signed_add_16_and_16(uint32_t a, uint32_t b) __attribute__((always_inline, unused)); |
|
|
static inline uint32_t signed_add_16_and_16(uint32_t a, uint32_t b) __attribute__((always_inline, unused)); |
|
|
static inline uint32_t signed_add_16_and_16(uint32_t a, uint32_t b) |
|
|
static inline uint32_t signed_add_16_and_16(uint32_t a, uint32_t b) |
|
|
{ |
|
|
{ |
|
|
|
|
|
|
|
|
return out; |
|
|
return out; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// computes (((a[31:16] - b[31:16]) << 16) | (a[15:0 - b[15:0])) (saturates) |
|
|
|
|
|
static inline int32_t signed_subtract_16_and_16(int32_t a, int32_t b) __attribute__((always_inline, unused)); |
|
|
|
|
|
static inline int32_t signed_subtract_16_and_16(int32_t a, int32_t b) |
|
|
|
|
|
{ |
|
|
|
|
|
int32_t out; |
|
|
|
|
|
asm volatile("qsub16 %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); |
|
|
|
|
|
return out; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// computes out = (((a[31:16]+b[31:16])/2) <<16) | ((a[15:0]+b[15:0])/2) |
|
|
|
|
|
static inline int32_t signed_halving_add_16_and_16(int32_t a, int32_t b) __attribute__((always_inline, unused)); |
|
|
|
|
|
static inline int32_t signed_halving_add_16_and_16(int32_t a, int32_t b) |
|
|
|
|
|
{ |
|
|
|
|
|
int32_t out; |
|
|
|
|
|
asm volatile("shadd16 %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); |
|
|
|
|
|
return out; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// computes out = (((a[31:16]-b[31:16])/2) <<16) | ((a[15:0]-b[15:0])/2) |
|
|
|
|
|
static inline int32_t signed_halving_subtract_16_and_16(int32_t a, int32_t b) __attribute__((always_inline, unused)); |
|
|
|
|
|
static inline int32_t signed_halving_subtract_16_and_16(int32_t a, int32_t b) |
|
|
|
|
|
{ |
|
|
|
|
|
int32_t out; |
|
|
|
|
|
asm volatile("shsub16 %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); |
|
|
|
|
|
return out; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
// computes (sum + ((a[31:0] * b[15:0]) >> 16)) |
|
|
// computes (sum + ((a[31:0] * b[15:0]) >> 16)) |
|
|
static inline int32_t signed_multiply_accumulate_32x16b(int32_t sum, int32_t a, uint32_t b) __attribute__((always_inline, unused)); |
|
|
static inline int32_t signed_multiply_accumulate_32x16b(int32_t sum, int32_t a, uint32_t b) __attribute__((always_inline, unused)); |
|
|
static inline int32_t signed_multiply_accumulate_32x16b(int32_t sum, int32_t a, uint32_t b) |
|
|
static inline int32_t signed_multiply_accumulate_32x16b(int32_t sum, int32_t a, uint32_t b) |