1
2
3
4#ifndef _I40E_TXRX_H_
5#define _I40E_TXRX_H_
6
7#include <net/xdp.h>
8
9
10#define I40E_DEFAULT_IRQ_WORK 256
11
12
13
14
15
16
17
18#define I40E_ITR_DYNAMIC 0x8000
19#define I40E_ITR_MASK 0x1FFE
20#define I40E_MIN_ITR 2
21#define I40E_ITR_20K 50
22#define I40E_ITR_8K 122
23#define I40E_MAX_ITR 8160
24#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
25#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
26#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
27
28#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
29#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
30
31
32
33
34#define INTRL_ENA BIT(6)
35#define I40E_MAX_INTRL 0x3B
36#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
37
38
39
40
41
42
43
44
45static inline u16 i40e_intrl_usec_to_reg(int intrl)
46{
47 if (intrl >> 2)
48 return ((intrl >> 2) | INTRL_ENA);
49 else
50 return 0;
51}
52
53#define I40E_QUEUE_END_OF_LIST 0x7FF
54
55
56
57
58
59
60enum i40e_dyn_idx_t {
61 I40E_IDX_ITR0 = 0,
62 I40E_IDX_ITR1 = 1,
63 I40E_IDX_ITR2 = 2,
64 I40E_ITR_NONE = 3
65};
66
67
68#define I40E_RX_ITR I40E_IDX_ITR0
69#define I40E_TX_ITR I40E_IDX_ITR1
70
71
72#define I40E_DEFAULT_RSS_HENA ( \
73 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
74 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
75 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
76 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
77 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
78 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
79 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
80 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
81 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
82 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
83 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
84
85#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
86 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
87 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
88 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
89 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
90 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
91 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
92
93#define i40e_pf_get_default_rss_hena(pf) \
94 (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
95 I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
96
97
98#define I40E_RXBUFFER_256 256
99#define I40E_RXBUFFER_1536 1536
100#define I40E_RXBUFFER_2048 2048
101#define I40E_RXBUFFER_3072 3072
102#define I40E_MAX_RXBUFFER 9728
103
104
105
106
107
108
109
110
111#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
112#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
113#define i40e_rx_desc i40e_16byte_rx_desc
114
115#define I40E_RX_DMA_ATTR \
116 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
117
118
119
120
121
122
123
124
125
126
127
128#if (PAGE_SIZE < 8192)
129#define I40E_2K_TOO_SMALL_WITH_PADDING \
130((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
131
132static inline int i40e_compute_pad(int rx_buf_len)
133{
134 int page_size, pad_size;
135
136 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
137 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
138
139 return pad_size;
140}
141
142static inline int i40e_skb_pad(void)
143{
144 int rx_buf_len;
145
146
147
148
149
150
151
152
153 if (I40E_2K_TOO_SMALL_WITH_PADDING)
154 rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
155 else
156 rx_buf_len = I40E_RXBUFFER_1536;
157
158
159 rx_buf_len -= NET_IP_ALIGN;
160
161 return i40e_compute_pad(rx_buf_len);
162}
163
164#define I40E_SKB_PAD i40e_skb_pad()
165#else
166#define I40E_2K_TOO_SMALL_WITH_PADDING false
167#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
168#endif
169
170
171
172
173
174
175
176
177
178
179
180static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
181 const u64 stat_err_bits)
182{
183 return !!(rx_desc->wb.qword1.status_error_len &
184 cpu_to_le64(stat_err_bits));
185}
186
187
188#define I40E_RX_BUFFER_WRITE 32
189
190#define I40E_RX_NEXT_DESC(r, i, n) \
191 do { \
192 (i)++; \
193 if ((i) == (r)->count) \
194 i = 0; \
195 (n) = I40E_RX_DESC((r), (i)); \
196 } while (0)
197
198
199#define I40E_MAX_BUFFER_TXD 8
200#define I40E_MIN_TX_LEN 17
201
202
203
204
205
206#define I40E_MAX_READ_REQ_SIZE 4096
207#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
208#define I40E_MAX_DATA_PER_TXD_ALIGNED \
209 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239static inline unsigned int i40e_txd_use_count(unsigned int size)
240{
241 return ((size * 85) >> 20) + 1;
242}
243
244
245#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
246
247#define I40E_TX_FLAGS_HW_VLAN BIT(1)
248#define I40E_TX_FLAGS_SW_VLAN BIT(2)
249#define I40E_TX_FLAGS_TSO BIT(3)
250#define I40E_TX_FLAGS_IPV4 BIT(4)
251#define I40E_TX_FLAGS_IPV6 BIT(5)
252#define I40E_TX_FLAGS_TSYN BIT(8)
253#define I40E_TX_FLAGS_FD_SB BIT(9)
254#define I40E_TX_FLAGS_UDP_TUNNEL BIT(10)
255#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
256#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
257#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
258#define I40E_TX_FLAGS_VLAN_SHIFT 16
259
260struct i40e_tx_buffer {
261 struct i40e_tx_desc *next_to_watch;
262 union {
263 struct xdp_frame *xdpf;
264 struct sk_buff *skb;
265 void *raw_buf;
266 };
267 unsigned int bytecount;
268 unsigned short gso_segs;
269
270 DEFINE_DMA_UNMAP_ADDR(dma);
271 DEFINE_DMA_UNMAP_LEN(len);
272 u32 tx_flags;
273};
274
275struct i40e_rx_buffer {
276 dma_addr_t dma;
277 struct page *page;
278 __u32 page_offset;
279 __u16 pagecnt_bias;
280};
281
282struct i40e_queue_stats {
283 u64 packets;
284 u64 bytes;
285};
286
287struct i40e_tx_queue_stats {
288 u64 restart_queue;
289 u64 tx_busy;
290 u64 tx_done_old;
291 u64 tx_linearize;
292 u64 tx_force_wb;
293 int prev_pkt_ctr;
294};
295
296struct i40e_rx_queue_stats {
297 u64 non_eop_descs;
298 u64 alloc_page_failed;
299 u64 alloc_buff_failed;
300 u64 page_reuse_count;
301 u64 realloc_count;
302};
303
304enum i40e_ring_state_t {
305 __I40E_TX_FDIR_INIT_DONE,
306 __I40E_TX_XPS_INIT_DONE,
307 __I40E_RING_STATE_NBITS
308};
309
310
311
312
313#define I40E_RX_DTYPE_HEADER_SPLIT 1
314#define I40E_RX_SPLIT_L2 0x1
315#define I40E_RX_SPLIT_IP 0x2
316#define I40E_RX_SPLIT_TCP_UDP 0x4
317#define I40E_RX_SPLIT_SCTP 0x8
318
319
320struct i40e_ring {
321 struct i40e_ring *next;
322 void *desc;
323 struct device *dev;
324 struct net_device *netdev;
325 struct bpf_prog *xdp_prog;
326 union {
327 struct i40e_tx_buffer *tx_bi;
328 struct i40e_rx_buffer *rx_bi;
329 struct xdp_buff **rx_bi_zc;
330 };
331 DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
332 u16 queue_index;
333 u8 dcb_tc;
334 u8 __iomem *tail;
335
336
337
338
339
340
341 u16 itr_setting;
342
343 u16 count;
344 u16 reg_idx;
345 u16 rx_buf_len;
346
347
348 u16 next_to_use;
349 u16 next_to_clean;
350 u16 xdp_tx_active;
351
352 u8 atr_sample_rate;
353 u8 atr_count;
354
355 bool ring_active;
356 bool arm_wb;
357 u8 packet_stride;
358
359 u16 flags;
360#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
361#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
362#define I40E_TXR_FLAGS_XDP BIT(2)
363
364
365 struct i40e_queue_stats stats;
366 struct u64_stats_sync syncp;
367 union {
368 struct i40e_tx_queue_stats tx_stats;
369 struct i40e_rx_queue_stats rx_stats;
370 };
371
372 unsigned int size;
373 dma_addr_t dma;
374
375 struct i40e_vsi *vsi;
376 struct i40e_q_vector *q_vector;
377
378 struct rcu_head rcu;
379 u16 next_to_alloc;
380 struct sk_buff *skb;
381
382
383
384
385
386
387
388
389 struct i40e_channel *ch;
390 u16 rx_offset;
391 struct xdp_rxq_info xdp_rxq;
392 struct xsk_buff_pool *xsk_pool;
393 struct xdp_desc *xsk_descs;
394} ____cacheline_internodealigned_in_smp;
395
396static inline bool ring_uses_build_skb(struct i40e_ring *ring)
397{
398 return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
399}
400
401static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
402{
403 ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
404}
405
406static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
407{
408 ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
409}
410
411static inline bool ring_is_xdp(struct i40e_ring *ring)
412{
413 return !!(ring->flags & I40E_TXR_FLAGS_XDP);
414}
415
416static inline void set_ring_xdp(struct i40e_ring *ring)
417{
418 ring->flags |= I40E_TXR_FLAGS_XDP;
419}
420
421#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002
422#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002
423#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e
424#define I40E_ITR_ADAPTIVE_LATENCY 0x8000
425#define I40E_ITR_ADAPTIVE_BULK 0x0000
426
427struct i40e_ring_container {
428 struct i40e_ring *ring;
429 unsigned long next_update;
430 unsigned int total_bytes;
431 unsigned int total_packets;
432 u16 count;
433 u16 target_itr;
434 u16 current_itr;
435};
436
437
438#define i40e_for_each_ring(pos, head) \
439 for (pos = (head).ring; pos != NULL; pos = pos->next)
440
441static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
442{
443#if (PAGE_SIZE < 8192)
444 if (ring->rx_buf_len > (PAGE_SIZE / 2))
445 return 1;
446#endif
447 return 0;
448}
449
450#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
451
452bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
453netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
454u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
455 struct net_device *sb_dev);
456void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
457void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
458int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
459int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
460void i40e_free_tx_resources(struct i40e_ring *tx_ring);
461void i40e_free_rx_resources(struct i40e_ring *rx_ring);
462int i40e_napi_poll(struct napi_struct *napi, int budget);
463void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
464u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
465void i40e_detect_recover_hung(struct i40e_vsi *vsi);
466int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
467bool __i40e_chk_linearize(struct sk_buff *skb);
468int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
469 u32 flags);
470int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
471
472
473
474
475
476
477
478
479static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
480{
481 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
482
483 return le32_to_cpu(*(volatile __le32 *)head);
484}
485
486
487
488
489
490
491
492
493
494static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
495{
496 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
497 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
498 int count = 0, size = skb_headlen(skb);
499
500 for (;;) {
501 count += i40e_txd_use_count(size);
502
503 if (!nr_frags--)
504 break;
505
506 size = skb_frag_size(frag++);
507 }
508
509 return count;
510}
511
512
513
514
515
516
517
518
519static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
520{
521 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
522 return 0;
523 return __i40e_maybe_stop_tx(tx_ring, size);
524}
525
526
527
528
529
530
531
532
533
534
535static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
536{
537
538 if (likely(count < I40E_MAX_BUFFER_TXD))
539 return false;
540
541 if (skb_is_gso(skb))
542 return __i40e_chk_linearize(skb);
543
544
545 return count != I40E_MAX_BUFFER_TXD;
546}
547
548
549
550
551
552static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
553{
554 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
555}
556#endif
557