| @@ -1,6 +1,6 @@ | |||
| #include "Audio.h" | |||
| #include "arm_math.h" | |||
| #include "utility/dspinst.h" | |||
| @@ -48,15 +48,6 @@ static void copy_to_fft_buffer(void *destination, const void *source) | |||
| } | |||
| } | |||
| // computes limit((val >> rshift), 2**bits) | |||
| static inline int32_t signed_saturate_rshift(int32_t val, int bits, int rshift) __attribute__((always_inline)); | |||
| static inline int32_t signed_saturate_rshift(int32_t val, int bits, int rshift) | |||
| { | |||
| int32_t out; | |||
| asm volatile("ssat %0, %1, %2, asr %3" : "=r" (out) : "I" (bits), "r" (val), "I" (rshift)); | |||
| return out; | |||
| } | |||
| static void apply_window_to_fft_buffer(void *buffer, const void *window) | |||
| { | |||
| int16_t *buf = (int16_t *)buffer; | |||
| @@ -1149,116 +1140,9 @@ void AudioPlayMemory::update(void) | |||
| /******************************************************************/ | |||
| // computes ((a[31:0] * b[15:0]) >> 16) | |||
| static inline int32_t signed_multiply_32x16b(int32_t a, uint32_t b) __attribute__((always_inline)); | |||
| static inline int32_t signed_multiply_32x16b(int32_t a, uint32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smulwb %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes ((a[31:0] * b[31:16]) >> 16) | |||
| static inline int32_t signed_multiply_32x16t(int32_t a, uint32_t b) __attribute__((always_inline)); | |||
| static inline int32_t signed_multiply_32x16t(int32_t a, uint32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smulwt %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes (((int64_t)a[31:0] * (int64_t)b[31:0]) >> 32) | |||
| static inline int32_t multiply_32x32_rshift32(int32_t a, int32_t b) __attribute__((always_inline)); | |||
| static inline int32_t multiply_32x32_rshift32(int32_t a, int32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smmul %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes (((int64_t)a[31:0] * (int64_t)b[31:0] + 0x8000000) >> 32) | |||
| static inline int32_t multiply_32x32_rshift32_rounded(int32_t a, int32_t b) __attribute__((always_inline)); | |||
| static inline int32_t multiply_32x32_rshift32_rounded(int32_t a, int32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smmulr %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes sum + (((int64_t)a[31:0] * (int64_t)b[31:0] + 0x8000000) >> 32) | |||
| static inline int32_t multiply_accumulate_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) __attribute__((always_inline)); | |||
| static inline int32_t multiply_accumulate_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smmlar %0, %2, %3, %1" : "=r" (out) : "r" (sum), "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes sum - (((int64_t)a[31:0] * (int64_t)b[31:0] + 0x8000000) >> 32) | |||
| static inline int32_t multiply_subtract_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) __attribute__((always_inline)); | |||
| static inline int32_t multiply_subtract_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smmlsr %0, %2, %3, %1" : "=r" (out) : "r" (sum), "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes ((a[15:0] << 16) | b[15:0]) | |||
| static inline uint32_t pack_16x16(int32_t a, int32_t b) __attribute__((always_inline)); | |||
| static inline uint32_t pack_16x16(int32_t a, int32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("pkhbt %0, %1, %2, lsl #16" : "=r" (out) : "r" (b), "r" (a)); | |||
| return out; | |||
| } | |||
| // computes (((a[31:16] + b[31:16]) << 16) | (a[15:0 + b[15:0])) | |||
| static inline uint32_t signed_add_16_and_16(uint32_t a, uint32_t b) __attribute__((always_inline)); | |||
| static inline uint32_t signed_add_16_and_16(uint32_t a, uint32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("qadd16 %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes (sum + ((a[31:0] * b[15:0]) >> 16)) | |||
| static inline int32_t signed_multiply_accumulate_32x16b(int32_t sum, int32_t a, uint32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smlawb %0, %2, %3, %1" : "=r" (out) : "r" (sum), "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes (sum + ((a[31:0] * b[31:16]) >> 16)) | |||
| static inline int32_t signed_multiply_accumulate_32x16t(int32_t sum, int32_t a, uint32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smlawt %0, %2, %3, %1" : "=r" (out) : "r" (sum), "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes logical and, forces compiler to allocate register and use single cycle instruction | |||
| static inline uint32_t logical_and(uint32_t a, uint32_t b) | |||
| { | |||
| asm volatile("and %0, %1" : "+r" (a) : "r" (b)); | |||
| return a; | |||
| } | |||
| void applyGain(int16_t *data, int32_t mult) | |||
| { | |||
| uint32_t *p = (uint32_t *)data; | |||
| @@ -1,17 +0,0 @@ | |||
| #! /usr/bin/perl | |||
| use Math::Trig ':pi'; | |||
| $len = 256; | |||
| print "#define SINE_TABLE_LEN $len\n"; | |||
| print "static const int16_t sine_table[] = {\n"; | |||
| for ($i=0; $i <= $len; $i++) { | |||
| $f = sin($i / $len * 2 * pi); | |||
| $d = sprintf "%.0f", $f * 32767.0; | |||
| #print $d; | |||
| printf "%6d", $d + 0; | |||
| print "," if ($i < $len); | |||
| print "\n" if ($i % 10) == 9; | |||
| } | |||
| print "\n" unless ($len % 10) == 9; | |||
| print "};\n"; | |||
| @@ -0,0 +1,111 @@ | |||
| #include <stdint.h> | |||
| // computes limit((val >> rshift), 2**bits) | |||
| static inline int32_t signed_saturate_rshift(int32_t val, int bits, int rshift) __attribute__((always_inline, unused)); | |||
| static inline int32_t signed_saturate_rshift(int32_t val, int bits, int rshift) | |||
| { | |||
| int32_t out; | |||
| asm volatile("ssat %0, %1, %2, asr %3" : "=r" (out) : "I" (bits), "r" (val), "I" (rshift)); | |||
| return out; | |||
| } | |||
| // computes ((a[31:0] * b[15:0]) >> 16) | |||
| static inline int32_t signed_multiply_32x16b(int32_t a, uint32_t b) __attribute__((always_inline, unused)); | |||
| static inline int32_t signed_multiply_32x16b(int32_t a, uint32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smulwb %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes ((a[31:0] * b[31:16]) >> 16) | |||
| static inline int32_t signed_multiply_32x16t(int32_t a, uint32_t b) __attribute__((always_inline, unused)); | |||
| static inline int32_t signed_multiply_32x16t(int32_t a, uint32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smulwt %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes (((int64_t)a[31:0] * (int64_t)b[31:0]) >> 32) | |||
| static inline int32_t multiply_32x32_rshift32(int32_t a, int32_t b) __attribute__((always_inline, unused)); | |||
| static inline int32_t multiply_32x32_rshift32(int32_t a, int32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smmul %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes (((int64_t)a[31:0] * (int64_t)b[31:0] + 0x8000000) >> 32) | |||
| static inline int32_t multiply_32x32_rshift32_rounded(int32_t a, int32_t b) __attribute__((always_inline, unused)); | |||
| static inline int32_t multiply_32x32_rshift32_rounded(int32_t a, int32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smmulr %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes sum + (((int64_t)a[31:0] * (int64_t)b[31:0] + 0x8000000) >> 32) | |||
| static inline int32_t multiply_accumulate_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) __attribute__((always_inline, unused)); | |||
| static inline int32_t multiply_accumulate_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smmlar %0, %2, %3, %1" : "=r" (out) : "r" (sum), "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes sum - (((int64_t)a[31:0] * (int64_t)b[31:0] + 0x8000000) >> 32) | |||
| static inline int32_t multiply_subtract_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) __attribute__((always_inline, unused)); | |||
| static inline int32_t multiply_subtract_32x32_rshift32_rounded(int32_t sum, int32_t a, int32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smmlsr %0, %2, %3, %1" : "=r" (out) : "r" (sum), "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes ((a[15:0] << 16) | b[15:0]) | |||
| static inline uint32_t pack_16x16(int32_t a, int32_t b) __attribute__((always_inline, unused)); | |||
| static inline uint32_t pack_16x16(int32_t a, int32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("pkhbt %0, %1, %2, lsl #16" : "=r" (out) : "r" (b), "r" (a)); | |||
| return out; | |||
| } | |||
| // computes (((a[31:16] + b[31:16]) << 16) | (a[15:0 + b[15:0])) | |||
| static inline uint32_t signed_add_16_and_16(uint32_t a, uint32_t b) __attribute__((always_inline, unused)); | |||
| static inline uint32_t signed_add_16_and_16(uint32_t a, uint32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("qadd16 %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes (sum + ((a[31:0] * b[15:0]) >> 16)) | |||
| static inline int32_t signed_multiply_accumulate_32x16b(int32_t sum, int32_t a, uint32_t b) __attribute__((always_inline, unused)); | |||
| static inline int32_t signed_multiply_accumulate_32x16b(int32_t sum, int32_t a, uint32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smlawb %0, %2, %3, %1" : "=r" (out) : "r" (sum), "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes (sum + ((a[31:0] * b[31:16]) >> 16)) | |||
| static inline int32_t signed_multiply_accumulate_32x16t(int32_t sum, int32_t a, uint32_t b) __attribute__((always_inline, unused)); | |||
| static inline int32_t signed_multiply_accumulate_32x16t(int32_t sum, int32_t a, uint32_t b) | |||
| { | |||
| int32_t out; | |||
| asm volatile("smlawt %0, %2, %3, %1" : "=r" (out) : "r" (sum), "r" (a), "r" (b)); | |||
| return out; | |||
| } | |||
| // computes logical and, forces compiler to allocate register and use single cycle instruction | |||
| static inline uint32_t logical_and(uint32_t a, uint32_t b) __attribute__((always_inline, unused)); | |||
| static inline uint32_t logical_and(uint32_t a, uint32_t b) | |||
| { | |||
| asm volatile("and %0, %1" : "+r" (a) : "r" (b)); | |||
| return a; | |||
| } | |||