| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __VDSO_MATH64_H |
| 3 | #define __VDSO_MATH64_H |
| 4 | |
| 5 | static __always_inline u32 |
| 6 | __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) |
| 7 | { |
| 8 | u32 ret = 0; |
| 9 | |
| 10 | while (dividend >= divisor) { |
| 11 | /* The following asm() prevents the compiler from |
| 12 | optimising this loop into a modulo operation. */ |
| 13 | asm("" : "+rm" (dividend)); |
| 14 | |
| 15 | dividend -= divisor; |
| 16 | ret++; |
| 17 | } |
| 18 | |
| 19 | *remainder = dividend; |
| 20 | |
| 21 | return ret; |
| 22 | } |
| 23 | |
| 24 | #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) |
| 25 | |
| 26 | #ifndef mul_u64_u32_add_u64_shr |
| 27 | static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift) |
| 28 | { |
| 29 | return (u64)((((unsigned __int128)a * mul) + b) >> shift); |
| 30 | } |
| 31 | #endif /* mul_u64_u32_add_u64_shr */ |
| 32 | |
| 33 | #else |
| 34 | |
| 35 | #ifndef mul_u64_u32_add_u64_shr |
| 36 | #ifndef mul_u32_u32 |
| 37 | static inline u64 mul_u32_u32(u32 a, u32 b) |
| 38 | { |
| 39 | return (u64)a * b; |
| 40 | } |
| 41 | #define mul_u32_u32 mul_u32_u32 |
| 42 | #endif |
| 43 | static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift) |
| 44 | { |
| 45 | u32 ah = a >> 32, al = a; |
| 46 | bool ovf; |
| 47 | u64 ret; |
| 48 | |
| 49 | ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret); |
| 50 | ret >>= shift; |
| 51 | if (ovf && shift) |
| 52 | ret += 1ULL << (64 - shift); |
| 53 | if (ah) |
| 54 | ret += mul_u32_u32(ah, mul) << (32 - shift); |
| 55 | |
| 56 | return ret; |
| 57 | } |
| 58 | #endif /* mul_u64_u32_add_u64_shr */ |
| 59 | |
| 60 | #endif |
| 61 | |
| 62 | #endif /* __VDSO_MATH64_H */ |
| 63 | |