1
2#ifndef _LINUX_MATH64_H
3#define _LINUX_MATH64_H
4
5#include <linux/types.h>
6#include <linux/math.h>
7#include <vdso/math64.h>
8#include <asm/div64.h>
9
10#if BITS_PER_LONG == 64
11
12#define div64_long(x, y) div64_s64((x), (y))
13#define div64_ul(x, y) div64_u64((x), (y))
14
15
16
17
18
19
20
21
22
23
24
25
26static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
27{
28 *remainder = dividend % divisor;
29 return dividend / divisor;
30}
31
32
33
34
35
36
37
38
39
40static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
41{
42 *remainder = dividend % divisor;
43 return dividend / divisor;
44}
45
46
47
48
49
50
51
52
53
54static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
55{
56 *remainder = dividend % divisor;
57 return dividend / divisor;
58}
59
60
61
62
63
64
65
66
67static inline u64 div64_u64(u64 dividend, u64 divisor)
68{
69 return dividend / divisor;
70}
71
72
73
74
75
76
77
78
79static inline s64 div64_s64(s64 dividend, s64 divisor)
80{
81 return dividend / divisor;
82}
83
84#elif BITS_PER_LONG == 32
85
86#define div64_long(x, y) div_s64((x), (y))
87#define div64_ul(x, y) div_u64((x), (y))
88
89#ifndef div_u64_rem
90static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
91{
92 *remainder = do_div(dividend, divisor);
93 return dividend;
94}
95#endif
96
97#ifndef div_s64_rem
98extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
99#endif
100
101#ifndef div64_u64_rem
102extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
103#endif
104
105#ifndef div64_u64
106extern u64 div64_u64(u64 dividend, u64 divisor);
107#endif
108
109#ifndef div64_s64
110extern s64 div64_s64(s64 dividend, s64 divisor);
111#endif
112
113#endif
114
115
116
117
118
119
120
121
122
123
124#ifndef div_u64
125static inline u64 div_u64(u64 dividend, u32 divisor)
126{
127 u32 remainder;
128 return div_u64_rem(dividend, divisor, &remainder);
129}
130#endif
131
132
133
134
135
136
137#ifndef div_s64
138static inline s64 div_s64(s64 dividend, s32 divisor)
139{
140 s32 remainder;
141 return div_s64_rem(dividend, divisor, &remainder);
142}
143#endif
144
145u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
146
147#ifndef mul_u32_u32
148
149
150
151static inline u64 mul_u32_u32(u32 a, u32 b)
152{
153 return (u64)a * b;
154}
155#endif
156
157#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
158
159#ifndef mul_u64_u32_shr
160static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
161{
162 return (u64)(((unsigned __int128)a * mul) >> shift);
163}
164#endif
165
166#ifndef mul_u64_u64_shr
167static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
168{
169 return (u64)(((unsigned __int128)a * mul) >> shift);
170}
171#endif
172
173#else
174
175#ifndef mul_u64_u32_shr
176static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
177{
178 u32 ah, al;
179 u64 ret;
180
181 al = a;
182 ah = a >> 32;
183
184 ret = mul_u32_u32(al, mul) >> shift;
185 if (ah)
186 ret += mul_u32_u32(ah, mul) << (32 - shift);
187
188 return ret;
189}
190#endif
191
192#ifndef mul_u64_u64_shr
193static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
194{
195 union {
196 u64 ll;
197 struct {
198#ifdef __BIG_ENDIAN
199 u32 high, low;
200#else
201 u32 low, high;
202#endif
203 } l;
204 } rl, rm, rn, rh, a0, b0;
205 u64 c;
206
207 a0.ll = a;
208 b0.ll = b;
209
210 rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
211 rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
212 rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
213 rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
214
215
216
217
218
219
220 rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
221 rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
222 rh.l.high = (c >> 32) + rh.l.high;
223
224
225
226
227
228 if (shift == 0)
229 return rl.ll;
230 if (shift < 64)
231 return (rl.ll >> shift) | (rh.ll << (64 - shift));
232 return rh.ll >> (shift & 63);
233}
234#endif
235
236#endif
237
238#ifndef mul_s64_u64_shr
239static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift)
240{
241 u64 ret;
242
243
244
245
246
247 ret = mul_u64_u64_shr(abs(a), b, shift);
248
249 if (a < 0)
250 ret = -((s64) ret);
251
252 return ret;
253}
254#endif
255
256#ifndef mul_u64_u32_div
257static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
258{
259 union {
260 u64 ll;
261 struct {
262#ifdef __BIG_ENDIAN
263 u32 high, low;
264#else
265 u32 low, high;
266#endif
267 } l;
268 } u, rl, rh;
269
270 u.ll = a;
271 rl.ll = mul_u32_u32(u.l.low, mul);
272 rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
273
274
275 rl.l.high = do_div(rh.ll, divisor);
276
277
278 do_div(rl.ll, divisor);
279
280 rl.l.high = rh.l.low;
281 return rl.ll;
282}
283#endif
284
285u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
286
287#define DIV64_U64_ROUND_UP(ll, d) \
288 ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
289
290
291
292
293
294
295
296
297
298
299
300#define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \
301 ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
302
303
304
305
306
307
308
309
310
311
312
313#define DIV_S64_ROUND_CLOSEST(dividend, divisor)( \
314{ \
315 s64 __x = (dividend); \
316 s32 __d = (divisor); \
317 ((__x > 0) == (__d > 0)) ? \
318 div_s64((__x + (__d / 2)), __d) : \
319 div_s64((__x - (__d / 2)), __d); \
320} \
321)
322#endif
323