| @@ -100,7 +100,7 @@ static inline int32_t multiply_32x32_rshift32(int32_t a, int32_t b) | |||
| asm volatile("smmul %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); | |||
| return out; | |||
| #elif defined(KINETISL) | |||
| return 0; // TODO.... | |||
| return ((int64_t)a * (int64_t)b) >> 32; | |||
| #endif | |||
| } | |||
| @@ -113,7 +113,7 @@ static inline int32_t multiply_32x32_rshift32_rounded(int32_t a, int32_t b) | |||
| asm volatile("smmulr %0, %1, %2" : "=r" (out) : "r" (a), "r" (b)); | |||
| return out; | |||
| #elif defined(KINETISL) | |||
| return 0; // TODO.... | |||
| return (((int64_t)a * (int64_t)b) + 0x8000000) >> 32; | |||
| #endif | |||
| } | |||
| @@ -126,7 +126,7 @@ static inline int32_t multiply_accumulate_32x32_rshift32_rounded(int32_t sum, in | |||
| asm volatile("smmlar %0, %2, %3, %1" : "=r" (out) : "r" (sum), "r" (a), "r" (b)); | |||
| return out; | |||
| #elif defined(KINETISL) | |||
| return 0; // TODO.... | |||
| return sum + ((((int64_t)a * (int64_t)b) + 0x8000000) >> 32); | |||
| #endif | |||
| } | |||
| @@ -139,7 +139,7 @@ static inline int32_t multiply_subtract_32x32_rshift32_rounded(int32_t sum, int3 | |||
| asm volatile("smmlsr %0, %2, %3, %1" : "=r" (out) : "r" (sum), "r" (a), "r" (b)); | |||
| return out; | |||
| #elif defined(KINETISL) | |||
| return 0; // TODO.... | |||
| return sum - ((((int64_t)a * (int64_t)b) + 0x8000000) >> 32); | |||
| #endif | |||
| } | |||