1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef _LINUX_ETHERDEVICE_H
18#define _LINUX_ETHERDEVICE_H
19
20#include <linux/if_ether.h>
21#include <linux/netdevice.h>
22#include <linux/random.h>
23#include <linux/crc32.h>
24#include <asm/unaligned.h>
25#include <asm/bitsperlong.h>
26
27#ifdef __KERNEL__
28struct device;
29int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
30unsigned char *arch_get_platform_mac_address(void);
31int nvmem_get_mac_address(struct device *dev, void *addrbuf);
32u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len);
33__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
34extern const struct header_ops eth_header_ops;
35
36int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
37 const void *daddr, const void *saddr, unsigned len);
38int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
39int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
40 __be16 type);
41void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
42 const unsigned char *haddr);
43__be16 eth_header_parse_protocol(const struct sk_buff *skb);
44int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
45void eth_commit_mac_addr_change(struct net_device *dev, void *p);
46int eth_mac_addr(struct net_device *dev, void *p);
47int eth_validate_addr(struct net_device *dev);
48
49struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
50 unsigned int rxqs);
51#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
52#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
53
54struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
55 unsigned int txqs,
56 unsigned int rxqs);
57#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
58
59struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
60int eth_gro_complete(struct sk_buff *skb, int nhoff);
61
62
63static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
64{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
65#define eth_stp_addr eth_reserved_addr_base
66
67
68
69
70
71
72
73
74
75
76static inline bool is_link_local_ether_addr(const u8 *addr)
77{
78 __be16 *a = (__be16 *)addr;
79 static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
80 static const __be16 m = cpu_to_be16(0xfff0);
81
82#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
83 return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
84 (__force int)((a[2] ^ b[2]) & m)) == 0;
85#else
86 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
87#endif
88}
89
90
91
92
93
94
95
96
97
98static inline bool is_zero_ether_addr(const u8 *addr)
99{
100#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
101 return ((*(const u32 *)addr) | (*(const u16 *)(addr + 4))) == 0;
102#else
103 return (*(const u16 *)(addr + 0) |
104 *(const u16 *)(addr + 2) |
105 *(const u16 *)(addr + 4)) == 0;
106#endif
107}
108
109
110
111
112
113
114
115
116static inline bool is_multicast_ether_addr(const u8 *addr)
117{
118#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
119 u32 a = *(const u32 *)addr;
120#else
121 u16 a = *(const u16 *)addr;
122#endif
123#ifdef __BIG_ENDIAN
124 return 0x01 & (a >> ((sizeof(a) * 8) - 8));
125#else
126 return 0x01 & a;
127#endif
128}
129
130static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
131{
132#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
133#ifdef __BIG_ENDIAN
134 return 0x01 & ((*(const u64 *)addr) >> 56);
135#else
136 return 0x01 & (*(const u64 *)addr);
137#endif
138#else
139 return is_multicast_ether_addr(addr);
140#endif
141}
142
143
144
145
146
147
148
149static inline bool is_local_ether_addr(const u8 *addr)
150{
151 return 0x02 & addr[0];
152}
153
154
155
156
157
158
159
160
161
162static inline bool is_broadcast_ether_addr(const u8 *addr)
163{
164 return (*(const u16 *)(addr + 0) &
165 *(const u16 *)(addr + 2) &
166 *(const u16 *)(addr + 4)) == 0xffff;
167}
168
169
170
171
172
173
174
175static inline bool is_unicast_ether_addr(const u8 *addr)
176{
177 return !is_multicast_ether_addr(addr);
178}
179
180
181
182
183
184
185
186
187
188
189
190
191static inline bool is_valid_ether_addr(const u8 *addr)
192{
193
194
195 return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
196}
197
198
199
200
201
202
203
204
205
206static inline bool eth_proto_is_802_3(__be16 proto)
207{
208#ifndef __BIG_ENDIAN
209
210 proto &= htons(0xFF00);
211#endif
212
213 return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
214}
215
216
217
218
219
220
221
222
223static inline void eth_random_addr(u8 *addr)
224{
225 get_random_bytes(addr, ETH_ALEN);
226 addr[0] &= 0xfe;
227 addr[0] |= 0x02;
228}
229
230#define random_ether_addr(addr) eth_random_addr(addr)
231
232
233
234
235
236
237
238static inline void eth_broadcast_addr(u8 *addr)
239{
240 memset(addr, 0xff, ETH_ALEN);
241}
242
243
244
245
246
247
248
249static inline void eth_zero_addr(u8 *addr)
250{
251 memset(addr, 0x00, ETH_ALEN);
252}
253
254
255
256
257
258
259
260
261
262
263static inline void eth_hw_addr_random(struct net_device *dev)
264{
265 dev->addr_assign_type = NET_ADDR_RANDOM;
266 eth_random_addr(dev->dev_addr);
267}
268
269
270
271
272
273
274
275static inline u32 eth_hw_addr_crc(struct netdev_hw_addr *ha)
276{
277 return ether_crc(ETH_ALEN, ha->addr);
278}
279
280
281
282
283
284
285
286
287static inline void ether_addr_copy(u8 *dst, const u8 *src)
288{
289#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
290 *(u32 *)dst = *(const u32 *)src;
291 *(u16 *)(dst + 4) = *(const u16 *)(src + 4);
292#else
293 u16 *a = (u16 *)dst;
294 const u16 *b = (const u16 *)src;
295
296 a[0] = b[0];
297 a[1] = b[1];
298 a[2] = b[2];
299#endif
300}
301
302
303
304
305
306
307
308
309
310static inline void eth_hw_addr_inherit(struct net_device *dst,
311 struct net_device *src)
312{
313 dst->addr_assign_type = src->addr_assign_type;
314 ether_addr_copy(dst->dev_addr, src->dev_addr);
315}
316
317
318
319
320
321
322
323
324
325
326static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
327{
328#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
329 u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) |
330 ((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4)));
331
332 return fold == 0;
333#else
334 const u16 *a = (const u16 *)addr1;
335 const u16 *b = (const u16 *)addr2;
336
337 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
338#endif
339}
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
356 const u8 addr2[6+2])
357{
358#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
359 u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
360
361#ifdef __BIG_ENDIAN
362 return (fold >> 16) == 0;
363#else
364 return (fold << 16) == 0;
365#endif
366#else
367 return ether_addr_equal(addr1, addr2);
368#endif
369}
370
371
372
373
374
375
376
377
378
379
380static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2)
381{
382#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
383 return ether_addr_equal(addr1, addr2);
384#else
385 return memcmp(addr1, addr2, ETH_ALEN) == 0;
386#endif
387}
388
389
390
391
392
393
394
395
396
397
398
399static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
400 const u8 *mask)
401{
402 int i;
403
404 for (i = 0; i < ETH_ALEN; i++) {
405 if ((addr1[i] ^ addr2[i]) & mask[i])
406 return false;
407 }
408
409 return true;
410}
411
412
413
414
415
416
417
418static inline u64 ether_addr_to_u64(const u8 *addr)
419{
420 u64 u = 0;
421 int i;
422
423 for (i = 0; i < ETH_ALEN; i++)
424 u = u << 8 | addr[i];
425
426 return u;
427}
428
429
430
431
432
433
434static inline void u64_to_ether_addr(u64 u, u8 *addr)
435{
436 int i;
437
438 for (i = ETH_ALEN - 1; i >= 0; i--) {
439 addr[i] = u & 0xff;
440 u = u >> 8;
441 }
442}
443
444
445
446
447
448
449static inline void eth_addr_dec(u8 *addr)
450{
451 u64 u = ether_addr_to_u64(addr);
452
453 u--;
454 u64_to_ether_addr(u, addr);
455}
456
457
458
459
460
461static inline void eth_addr_inc(u8 *addr)
462{
463 u64 u = ether_addr_to_u64(addr);
464
465 u++;
466 u64_to_ether_addr(u, addr);
467}
468
469
470
471
472
473
474
475
476
477
478
479
480static inline bool is_etherdev_addr(const struct net_device *dev,
481 const u8 addr[6 + 2])
482{
483 struct netdev_hw_addr *ha;
484 bool res = false;
485
486 rcu_read_lock();
487 for_each_dev_addr(dev, ha) {
488 res = ether_addr_equal_64bits(addr, ha->addr);
489 if (res)
490 break;
491 }
492 rcu_read_unlock();
493 return res;
494}
495#endif
496
497
498
499
500
501
502
503
504
505
506
507
508
509static inline unsigned long compare_ether_header(const void *a, const void *b)
510{
511#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
512 unsigned long fold;
513
514
515
516
517
518
519
520
521
522 fold = *(unsigned long *)a ^ *(unsigned long *)b;
523 fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6);
524 return fold;
525#else
526 u32 *a32 = (u32 *)((u8 *)a + 2);
527 u32 *b32 = (u32 *)((u8 *)b + 2);
528
529 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
530 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
531#endif
532}
533
534
535
536
537
538
539
540
541static inline int eth_skb_pad(struct sk_buff *skb)
542{
543 return skb_put_padto(skb, ETH_ZLEN);
544}
545
546#endif
547