1
2
3
4#include <linux/module.h>
5#include <linux/types.h>
6#include <linux/if_vlan.h>
7#include <linux/aer.h>
8#include <linux/tcp.h>
9#include <linux/udp.h>
10#include <linux/ip.h>
11#include <linux/pm_runtime.h>
12#include <net/pkt_sched.h>
13#include <linux/bpf_trace.h>
14#include <net/xdp_sock_drv.h>
15#include <net/ipv6.h>
16
17#include "igc.h"
18#include "igc_hw.h"
19#include "igc_tsn.h"
20#include "igc_xdp.h"
21
22#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
23
24#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
25
26#define IGC_XDP_PASS 0
27#define IGC_XDP_CONSUMED BIT(0)
28#define IGC_XDP_TX BIT(1)
29#define IGC_XDP_REDIRECT BIT(2)
30
31static int debug = -1;
32
33MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
34MODULE_DESCRIPTION(DRV_SUMMARY);
35MODULE_LICENSE("GPL v2");
36module_param(debug, int, 0);
37MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
38
39char igc_driver_name[] = "igc";
40static const char igc_driver_string[] = DRV_SUMMARY;
41static const char igc_copyright[] =
42 "Copyright(c) 2018 Intel Corporation.";
43
44static const struct igc_info *igc_info_tbl[] = {
45 [board_base] = &igc_base_info,
46};
47
48static const struct pci_device_id igc_pci_tbl[] = {
49 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
57 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
58 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
59 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
60 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
61 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
62 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
63 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
64
65 {0, }
66};
67
68MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
69
70enum latency_range {
71 lowest_latency = 0,
72 low_latency = 1,
73 bulk_latency = 2,
74 latency_invalid = 255
75};
76
77void igc_reset(struct igc_adapter *adapter)
78{
79 struct net_device *dev = adapter->netdev;
80 struct igc_hw *hw = &adapter->hw;
81 struct igc_fc_info *fc = &hw->fc;
82 u32 pba, hwm;
83
84
85 pba = IGC_PBA_34K;
86
87
88
89
90
91
92
93
94
95 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
96
97 fc->high_water = hwm & 0xFFFFFFF0;
98 fc->low_water = fc->high_water - 16;
99 fc->pause_time = 0xFFFF;
100 fc->send_xon = 1;
101 fc->current_mode = fc->requested_mode;
102
103 hw->mac.ops.reset_hw(hw);
104
105 if (hw->mac.ops.init_hw(hw))
106 netdev_err(dev, "Error on hardware initialization\n");
107
108
109 igc_set_eee_i225(hw, true, true, true);
110
111 if (!netif_running(adapter->netdev))
112 igc_power_down_phy_copper_base(&adapter->hw);
113
114
115 wr32(IGC_VET, ETH_P_8021Q);
116
117
118 igc_ptp_reset(adapter);
119
120
121 igc_tsn_offload_apply(adapter);
122
123 igc_get_phy_info(hw);
124}
125
126
127
128
129
130static void igc_power_up_link(struct igc_adapter *adapter)
131{
132 igc_reset_phy(&adapter->hw);
133
134 igc_power_up_phy_copper(&adapter->hw);
135
136 igc_setup_link(&adapter->hw);
137}
138
139
140
141
142
143
144
145
146
147static void igc_release_hw_control(struct igc_adapter *adapter)
148{
149 struct igc_hw *hw = &adapter->hw;
150 u32 ctrl_ext;
151
152 if (!pci_device_is_present(adapter->pdev))
153 return;
154
155
156 ctrl_ext = rd32(IGC_CTRL_EXT);
157 wr32(IGC_CTRL_EXT,
158 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
159}
160
161
162
163
164
165
166
167
168
169static void igc_get_hw_control(struct igc_adapter *adapter)
170{
171 struct igc_hw *hw = &adapter->hw;
172 u32 ctrl_ext;
173
174
175 ctrl_ext = rd32(IGC_CTRL_EXT);
176 wr32(IGC_CTRL_EXT,
177 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
178}
179
180static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf)
181{
182 dma_unmap_single(dev, dma_unmap_addr(buf, dma),
183 dma_unmap_len(buf, len), DMA_TO_DEVICE);
184
185 dma_unmap_len_set(buf, len, 0);
186}
187
188
189
190
191
192static void igc_clean_tx_ring(struct igc_ring *tx_ring)
193{
194 u16 i = tx_ring->next_to_clean;
195 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
196 u32 xsk_frames = 0;
197
198 while (i != tx_ring->next_to_use) {
199 union igc_adv_tx_desc *eop_desc, *tx_desc;
200
201 switch (tx_buffer->type) {
202 case IGC_TX_BUFFER_TYPE_XSK:
203 xsk_frames++;
204 break;
205 case IGC_TX_BUFFER_TYPE_XDP:
206 xdp_return_frame(tx_buffer->xdpf);
207 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
208 break;
209 case IGC_TX_BUFFER_TYPE_SKB:
210 dev_kfree_skb_any(tx_buffer->skb);
211 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
212 break;
213 default:
214 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
215 break;
216 }
217
218
219 eop_desc = tx_buffer->next_to_watch;
220 tx_desc = IGC_TX_DESC(tx_ring, i);
221
222
223 while (tx_desc != eop_desc) {
224 tx_buffer++;
225 tx_desc++;
226 i++;
227 if (unlikely(i == tx_ring->count)) {
228 i = 0;
229 tx_buffer = tx_ring->tx_buffer_info;
230 tx_desc = IGC_TX_DESC(tx_ring, 0);
231 }
232
233
234 if (dma_unmap_len(tx_buffer, len))
235 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
236 }
237
238 tx_buffer->next_to_watch = NULL;
239
240
241 tx_buffer++;
242 i++;
243 if (unlikely(i == tx_ring->count)) {
244 i = 0;
245 tx_buffer = tx_ring->tx_buffer_info;
246 }
247 }
248
249 if (tx_ring->xsk_pool && xsk_frames)
250 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
251
252
253 netdev_tx_reset_queue(txring_txq(tx_ring));
254
255
256 tx_ring->next_to_use = 0;
257 tx_ring->next_to_clean = 0;
258}
259
260
261
262
263
264
265
266void igc_free_tx_resources(struct igc_ring *tx_ring)
267{
268 igc_clean_tx_ring(tx_ring);
269
270 vfree(tx_ring->tx_buffer_info);
271 tx_ring->tx_buffer_info = NULL;
272
273
274 if (!tx_ring->desc)
275 return;
276
277 dma_free_coherent(tx_ring->dev, tx_ring->size,
278 tx_ring->desc, tx_ring->dma);
279
280 tx_ring->desc = NULL;
281}
282
283
284
285
286
287
288
289static void igc_free_all_tx_resources(struct igc_adapter *adapter)
290{
291 int i;
292
293 for (i = 0; i < adapter->num_tx_queues; i++)
294 igc_free_tx_resources(adapter->tx_ring[i]);
295}
296
297
298
299
300
301static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
302{
303 int i;
304
305 for (i = 0; i < adapter->num_tx_queues; i++)
306 if (adapter->tx_ring[i])
307 igc_clean_tx_ring(adapter->tx_ring[i]);
308}
309
310
311
312
313
314
315
316int igc_setup_tx_resources(struct igc_ring *tx_ring)
317{
318 struct net_device *ndev = tx_ring->netdev;
319 struct device *dev = tx_ring->dev;
320 int size = 0;
321
322 size = sizeof(struct igc_tx_buffer) * tx_ring->count;
323 tx_ring->tx_buffer_info = vzalloc(size);
324 if (!tx_ring->tx_buffer_info)
325 goto err;
326
327
328 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
329 tx_ring->size = ALIGN(tx_ring->size, 4096);
330
331 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
332 &tx_ring->dma, GFP_KERNEL);
333
334 if (!tx_ring->desc)
335 goto err;
336
337 tx_ring->next_to_use = 0;
338 tx_ring->next_to_clean = 0;
339
340 return 0;
341
342err:
343 vfree(tx_ring->tx_buffer_info);
344 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
345 return -ENOMEM;
346}
347
348
349
350
351
352
353
354static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
355{
356 struct net_device *dev = adapter->netdev;
357 int i, err = 0;
358
359 for (i = 0; i < adapter->num_tx_queues; i++) {
360 err = igc_setup_tx_resources(adapter->tx_ring[i]);
361 if (err) {
362 netdev_err(dev, "Error on Tx queue %u setup\n", i);
363 for (i--; i >= 0; i--)
364 igc_free_tx_resources(adapter->tx_ring[i]);
365 break;
366 }
367 }
368
369 return err;
370}
371
372static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring)
373{
374 u16 i = rx_ring->next_to_clean;
375
376 dev_kfree_skb(rx_ring->skb);
377 rx_ring->skb = NULL;
378
379
380 while (i != rx_ring->next_to_alloc) {
381 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
382
383
384
385
386 dma_sync_single_range_for_cpu(rx_ring->dev,
387 buffer_info->dma,
388 buffer_info->page_offset,
389 igc_rx_bufsz(rx_ring),
390 DMA_FROM_DEVICE);
391
392
393 dma_unmap_page_attrs(rx_ring->dev,
394 buffer_info->dma,
395 igc_rx_pg_size(rx_ring),
396 DMA_FROM_DEVICE,
397 IGC_RX_DMA_ATTR);
398 __page_frag_cache_drain(buffer_info->page,
399 buffer_info->pagecnt_bias);
400
401 i++;
402 if (i == rx_ring->count)
403 i = 0;
404 }
405}
406
407static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring)
408{
409 struct igc_rx_buffer *bi;
410 u16 i;
411
412 for (i = 0; i < ring->count; i++) {
413 bi = &ring->rx_buffer_info[i];
414 if (!bi->xdp)
415 continue;
416
417 xsk_buff_free(bi->xdp);
418 bi->xdp = NULL;
419 }
420}
421
422
423
424
425
426static void igc_clean_rx_ring(struct igc_ring *ring)
427{
428 if (ring->xsk_pool)
429 igc_clean_rx_ring_xsk_pool(ring);
430 else
431 igc_clean_rx_ring_page_shared(ring);
432
433 clear_ring_uses_large_buffer(ring);
434
435 ring->next_to_alloc = 0;
436 ring->next_to_clean = 0;
437 ring->next_to_use = 0;
438}
439
440
441
442
443
444static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
445{
446 int i;
447
448 for (i = 0; i < adapter->num_rx_queues; i++)
449 if (adapter->rx_ring[i])
450 igc_clean_rx_ring(adapter->rx_ring[i]);
451}
452
453
454
455
456
457
458
459void igc_free_rx_resources(struct igc_ring *rx_ring)
460{
461 igc_clean_rx_ring(rx_ring);
462
463 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
464
465 vfree(rx_ring->rx_buffer_info);
466 rx_ring->rx_buffer_info = NULL;
467
468
469 if (!rx_ring->desc)
470 return;
471
472 dma_free_coherent(rx_ring->dev, rx_ring->size,
473 rx_ring->desc, rx_ring->dma);
474
475 rx_ring->desc = NULL;
476}
477
478
479
480
481
482
483
484static void igc_free_all_rx_resources(struct igc_adapter *adapter)
485{
486 int i;
487
488 for (i = 0; i < adapter->num_rx_queues; i++)
489 igc_free_rx_resources(adapter->rx_ring[i]);
490}
491
492
493
494
495
496
497
498int igc_setup_rx_resources(struct igc_ring *rx_ring)
499{
500 struct net_device *ndev = rx_ring->netdev;
501 struct device *dev = rx_ring->dev;
502 u8 index = rx_ring->queue_index;
503 int size, desc_len, res;
504
505 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
506 rx_ring->q_vector->napi.napi_id);
507 if (res < 0) {
508 netdev_err(ndev, "Failed to register xdp_rxq index %u\n",
509 index);
510 return res;
511 }
512
513 size = sizeof(struct igc_rx_buffer) * rx_ring->count;
514 rx_ring->rx_buffer_info = vzalloc(size);
515 if (!rx_ring->rx_buffer_info)
516 goto err;
517
518 desc_len = sizeof(union igc_adv_rx_desc);
519
520
521 rx_ring->size = rx_ring->count * desc_len;
522 rx_ring->size = ALIGN(rx_ring->size, 4096);
523
524 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
525 &rx_ring->dma, GFP_KERNEL);
526
527 if (!rx_ring->desc)
528 goto err;
529
530 rx_ring->next_to_alloc = 0;
531 rx_ring->next_to_clean = 0;
532 rx_ring->next_to_use = 0;
533
534 return 0;
535
536err:
537 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
538 vfree(rx_ring->rx_buffer_info);
539 rx_ring->rx_buffer_info = NULL;
540 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
541 return -ENOMEM;
542}
543
544
545
546
547
548
549
550
551static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
552{
553 struct net_device *dev = adapter->netdev;
554 int i, err = 0;
555
556 for (i = 0; i < adapter->num_rx_queues; i++) {
557 err = igc_setup_rx_resources(adapter->rx_ring[i]);
558 if (err) {
559 netdev_err(dev, "Error on Rx queue %u setup\n", i);
560 for (i--; i >= 0; i--)
561 igc_free_rx_resources(adapter->rx_ring[i]);
562 break;
563 }
564 }
565
566 return err;
567}
568
569static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter,
570 struct igc_ring *ring)
571{
572 if (!igc_xdp_is_enabled(adapter) ||
573 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags))
574 return NULL;
575
576 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index);
577}
578
579
580
581
582
583
584
585
586static void igc_configure_rx_ring(struct igc_adapter *adapter,
587 struct igc_ring *ring)
588{
589 struct igc_hw *hw = &adapter->hw;
590 union igc_adv_rx_desc *rx_desc;
591 int reg_idx = ring->reg_idx;
592 u32 srrctl = 0, rxdctl = 0;
593 u64 rdba = ring->dma;
594 u32 buf_size;
595
596 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
597 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
598 if (ring->xsk_pool) {
599 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
600 MEM_TYPE_XSK_BUFF_POOL,
601 NULL));
602 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
603 } else {
604 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
605 MEM_TYPE_PAGE_SHARED,
606 NULL));
607 }
608
609 if (igc_xdp_is_enabled(adapter))
610 set_ring_uses_large_buffer(ring);
611
612
613 wr32(IGC_RXDCTL(reg_idx), 0);
614
615
616 wr32(IGC_RDBAL(reg_idx),
617 rdba & 0x00000000ffffffffULL);
618 wr32(IGC_RDBAH(reg_idx), rdba >> 32);
619 wr32(IGC_RDLEN(reg_idx),
620 ring->count * sizeof(union igc_adv_rx_desc));
621
622
623 ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
624 wr32(IGC_RDH(reg_idx), 0);
625 writel(0, ring->tail);
626
627
628 ring->next_to_clean = 0;
629 ring->next_to_use = 0;
630
631 if (ring->xsk_pool)
632 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
633 else if (ring_uses_large_buffer(ring))
634 buf_size = IGC_RXBUFFER_3072;
635 else
636 buf_size = IGC_RXBUFFER_2048;
637
638 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
639 srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT;
640 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
641
642 wr32(IGC_SRRCTL(reg_idx), srrctl);
643
644 rxdctl |= IGC_RX_PTHRESH;
645 rxdctl |= IGC_RX_HTHRESH << 8;
646 rxdctl |= IGC_RX_WTHRESH << 16;
647
648
649 memset(ring->rx_buffer_info, 0,
650 sizeof(struct igc_rx_buffer) * ring->count);
651
652
653 rx_desc = IGC_RX_DESC(ring, 0);
654 rx_desc->wb.upper.length = 0;
655
656
657 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
658
659 wr32(IGC_RXDCTL(reg_idx), rxdctl);
660}
661
662
663
664
665
666
667
668static void igc_configure_rx(struct igc_adapter *adapter)
669{
670 int i;
671
672
673
674
675 for (i = 0; i < adapter->num_rx_queues; i++)
676 igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
677}
678
679
680
681
682
683
684
685
686static void igc_configure_tx_ring(struct igc_adapter *adapter,
687 struct igc_ring *ring)
688{
689 struct igc_hw *hw = &adapter->hw;
690 int reg_idx = ring->reg_idx;
691 u64 tdba = ring->dma;
692 u32 txdctl = 0;
693
694 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
695
696
697 wr32(IGC_TXDCTL(reg_idx), 0);
698 wrfl();
699 mdelay(10);
700
701 wr32(IGC_TDLEN(reg_idx),
702 ring->count * sizeof(union igc_adv_tx_desc));
703 wr32(IGC_TDBAL(reg_idx),
704 tdba & 0x00000000ffffffffULL);
705 wr32(IGC_TDBAH(reg_idx), tdba >> 32);
706
707 ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
708 wr32(IGC_TDH(reg_idx), 0);
709 writel(0, ring->tail);
710
711 txdctl |= IGC_TX_PTHRESH;
712 txdctl |= IGC_TX_HTHRESH << 8;
713 txdctl |= IGC_TX_WTHRESH << 16;
714
715 txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
716 wr32(IGC_TXDCTL(reg_idx), txdctl);
717}
718
719
720
721
722
723
724
725static void igc_configure_tx(struct igc_adapter *adapter)
726{
727 int i;
728
729 for (i = 0; i < adapter->num_tx_queues; i++)
730 igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
731}
732
733
734
735
736
737static void igc_setup_mrqc(struct igc_adapter *adapter)
738{
739 struct igc_hw *hw = &adapter->hw;
740 u32 j, num_rx_queues;
741 u32 mrqc, rxcsum;
742 u32 rss_key[10];
743
744 netdev_rss_key_fill(rss_key, sizeof(rss_key));
745 for (j = 0; j < 10; j++)
746 wr32(IGC_RSSRK(j), rss_key[j]);
747
748 num_rx_queues = adapter->rss_queues;
749
750 if (adapter->rss_indir_tbl_init != num_rx_queues) {
751 for (j = 0; j < IGC_RETA_SIZE; j++)
752 adapter->rss_indir_tbl[j] =
753 (j * num_rx_queues) / IGC_RETA_SIZE;
754 adapter->rss_indir_tbl_init = num_rx_queues;
755 }
756 igc_write_rss_indir_tbl(adapter);
757
758
759
760
761
762 rxcsum = rd32(IGC_RXCSUM);
763 rxcsum |= IGC_RXCSUM_PCSD;
764
765
766 rxcsum |= IGC_RXCSUM_CRCOFL;
767
768
769 wr32(IGC_RXCSUM, rxcsum);
770
771
772
773
774 mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
775 IGC_MRQC_RSS_FIELD_IPV4_TCP |
776 IGC_MRQC_RSS_FIELD_IPV6 |
777 IGC_MRQC_RSS_FIELD_IPV6_TCP |
778 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
779
780 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
781 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
782 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
783 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
784
785 mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
786
787 wr32(IGC_MRQC, mrqc);
788}
789
790
791
792
793
794static void igc_setup_rctl(struct igc_adapter *adapter)
795{
796 struct igc_hw *hw = &adapter->hw;
797 u32 rctl;
798
799 rctl = rd32(IGC_RCTL);
800
801 rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
802 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
803
804 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
805 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
806
807
808
809
810 rctl |= IGC_RCTL_SECRC;
811
812
813 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
814
815
816 rctl |= IGC_RCTL_LPE;
817
818
819 wr32(IGC_RXDCTL(0), 0);
820
821
822 if (adapter->netdev->features & NETIF_F_RXALL) {
823
824
825
826 rctl |= (IGC_RCTL_SBP |
827 IGC_RCTL_BAM |
828 IGC_RCTL_PMCF);
829
830 rctl &= ~(IGC_RCTL_DPF |
831 IGC_RCTL_CFIEN);
832 }
833
834 wr32(IGC_RCTL, rctl);
835}
836
837
838
839
840
841static void igc_setup_tctl(struct igc_adapter *adapter)
842{
843 struct igc_hw *hw = &adapter->hw;
844 u32 tctl;
845
846
847 wr32(IGC_TXDCTL(0), 0);
848
849
850 tctl = rd32(IGC_TCTL);
851 tctl &= ~IGC_TCTL_CT;
852 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
853 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
854
855
856 tctl |= IGC_TCTL_EN;
857
858 wr32(IGC_TCTL, tctl);
859}
860
861
862
863
864
865
866
867
868
869
870
871static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
872 enum igc_mac_filter_type type,
873 const u8 *addr, int queue)
874{
875 struct net_device *dev = adapter->netdev;
876 struct igc_hw *hw = &adapter->hw;
877 u32 ral, rah;
878
879 if (WARN_ON(index >= hw->mac.rar_entry_count))
880 return;
881
882 ral = le32_to_cpup((__le32 *)(addr));
883 rah = le16_to_cpup((__le16 *)(addr + 4));
884
885 if (type == IGC_MAC_FILTER_TYPE_SRC) {
886 rah &= ~IGC_RAH_ASEL_MASK;
887 rah |= IGC_RAH_ASEL_SRC_ADDR;
888 }
889
890 if (queue >= 0) {
891 rah &= ~IGC_RAH_QSEL_MASK;
892 rah |= (queue << IGC_RAH_QSEL_SHIFT);
893 rah |= IGC_RAH_QSEL_ENABLE;
894 }
895
896 rah |= IGC_RAH_AV;
897
898 wr32(IGC_RAL(index), ral);
899 wr32(IGC_RAH(index), rah);
900
901 netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
902}
903
904
905
906
907
908
909static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
910{
911 struct net_device *dev = adapter->netdev;
912 struct igc_hw *hw = &adapter->hw;
913
914 if (WARN_ON(index >= hw->mac.rar_entry_count))
915 return;
916
917 wr32(IGC_RAL(index), 0);
918 wr32(IGC_RAH(index), 0);
919
920 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
921}
922
923
924static void igc_set_default_mac_filter(struct igc_adapter *adapter)
925{
926 struct net_device *dev = adapter->netdev;
927 u8 *addr = adapter->hw.mac.addr;
928
929 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
930
931 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
932}
933
934
935
936
937
938
939
940
941static int igc_set_mac(struct net_device *netdev, void *p)
942{
943 struct igc_adapter *adapter = netdev_priv(netdev);
944 struct igc_hw *hw = &adapter->hw;
945 struct sockaddr *addr = p;
946
947 if (!is_valid_ether_addr(addr->sa_data))
948 return -EADDRNOTAVAIL;
949
950 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
951 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
952
953
954 igc_set_default_mac_filter(adapter);
955
956 return 0;
957}
958
959
960
961
962
963
964
965
966
967
968static int igc_write_mc_addr_list(struct net_device *netdev)
969{
970 struct igc_adapter *adapter = netdev_priv(netdev);
971 struct igc_hw *hw = &adapter->hw;
972 struct netdev_hw_addr *ha;
973 u8 *mta_list;
974 int i;
975
976 if (netdev_mc_empty(netdev)) {
977
978 igc_update_mc_addr_list(hw, NULL, 0);
979 return 0;
980 }
981
982 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
983 if (!mta_list)
984 return -ENOMEM;
985
986
987 i = 0;
988 netdev_for_each_mc_addr(ha, netdev)
989 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
990
991 igc_update_mc_addr_list(hw, mta_list, i);
992 kfree(mta_list);
993
994 return netdev_mc_count(netdev);
995}
996
997static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
998{
999 ktime_t cycle_time = adapter->cycle_time;
1000 ktime_t base_time = adapter->base_time;
1001 u32 launchtime;
1002
1003
1004
1005
1006
1007
1008
1009 div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
1010
1011 return cpu_to_le32(launchtime);
1012}
1013
1014static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
1015 struct igc_tx_buffer *first,
1016 u32 vlan_macip_lens, u32 type_tucmd,
1017 u32 mss_l4len_idx)
1018{
1019 struct igc_adv_tx_context_desc *context_desc;
1020 u16 i = tx_ring->next_to_use;
1021
1022 context_desc = IGC_TX_CTXTDESC(tx_ring, i);
1023
1024 i++;
1025 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1026
1027
1028 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
1029
1030
1031 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
1032 mss_l4len_idx |= tx_ring->reg_idx << 4;
1033
1034 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1035 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1036 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1037
1038
1039
1040
1041 if (tx_ring->launchtime_enable) {
1042 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1043 ktime_t txtime = first->skb->tstamp;
1044
1045 skb_txtime_consumed(first->skb);
1046 context_desc->launch_time = igc_tx_launchtime(adapter,
1047 txtime);
1048 } else {
1049 context_desc->launch_time = 0;
1050 }
1051}
1052
1053static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
1054{
1055 struct sk_buff *skb = first->skb;
1056 u32 vlan_macip_lens = 0;
1057 u32 type_tucmd = 0;
1058
1059 if (skb->ip_summed != CHECKSUM_PARTIAL) {
1060csum_failed:
1061 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
1062 !tx_ring->launchtime_enable)
1063 return;
1064 goto no_csum;
1065 }
1066
1067 switch (skb->csum_offset) {
1068 case offsetof(struct tcphdr, check):
1069 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1070 fallthrough;
1071 case offsetof(struct udphdr, check):
1072 break;
1073 case offsetof(struct sctphdr, checksum):
1074
1075 if (skb_csum_is_sctp(skb)) {
1076 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
1077 break;
1078 }
1079 fallthrough;
1080 default:
1081 skb_checksum_help(skb);
1082 goto csum_failed;
1083 }
1084
1085
1086 first->tx_flags |= IGC_TX_FLAGS_CSUM;
1087 vlan_macip_lens = skb_checksum_start_offset(skb) -
1088 skb_network_offset(skb);
1089no_csum:
1090 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
1091 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1092
1093 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
1094}
1095
1096static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1097{
1098 struct net_device *netdev = tx_ring->netdev;
1099
1100 netif_stop_subqueue(netdev, tx_ring->queue_index);
1101
1102
1103 smp_mb();
1104
1105
1106
1107
1108 if (igc_desc_unused(tx_ring) < size)
1109 return -EBUSY;
1110
1111
1112 netif_wake_subqueue(netdev, tx_ring->queue_index);
1113
1114 u64_stats_update_begin(&tx_ring->tx_syncp2);
1115 tx_ring->tx_stats.restart_queue2++;
1116 u64_stats_update_end(&tx_ring->tx_syncp2);
1117
1118 return 0;
1119}
1120
1121static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1122{
1123 if (igc_desc_unused(tx_ring) >= size)
1124 return 0;
1125 return __igc_maybe_stop_tx(tx_ring, size);
1126}
1127
1128#define IGC_SET_FLAG(_input, _flag, _result) \
1129 (((_flag) <= (_result)) ? \
1130 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
1131 ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
1132
1133static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
1134{
1135
1136 u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
1137 IGC_ADVTXD_DCMD_DEXT |
1138 IGC_ADVTXD_DCMD_IFCS;
1139
1140
1141 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN,
1142 IGC_ADVTXD_DCMD_VLE);
1143
1144
1145 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1146 (IGC_ADVTXD_DCMD_TSE));
1147
1148
1149 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
1150 (IGC_ADVTXD_MAC_TSTAMP));
1151
1152
1153 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
1154
1155 return cmd_type;
1156}
1157
1158static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1159 union igc_adv_tx_desc *tx_desc,
1160 u32 tx_flags, unsigned int paylen)
1161{
1162 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
1163
1164
1165 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
1166 ((IGC_TXD_POPTS_TXSM << 8) /
1167 IGC_TX_FLAGS_CSUM);
1168
1169
1170 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
1171 (((IGC_TXD_POPTS_IXSM << 8)) /
1172 IGC_TX_FLAGS_IPV4);
1173
1174 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1175}
1176
1177static int igc_tx_map(struct igc_ring *tx_ring,
1178 struct igc_tx_buffer *first,
1179 const u8 hdr_len)
1180{
1181 struct sk_buff *skb = first->skb;
1182 struct igc_tx_buffer *tx_buffer;
1183 union igc_adv_tx_desc *tx_desc;
1184 u32 tx_flags = first->tx_flags;
1185 skb_frag_t *frag;
1186 u16 i = tx_ring->next_to_use;
1187 unsigned int data_len, size;
1188 dma_addr_t dma;
1189 u32 cmd_type;
1190
1191 cmd_type = igc_tx_cmd_type(skb, tx_flags);
1192 tx_desc = IGC_TX_DESC(tx_ring, i);
1193
1194 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1195
1196 size = skb_headlen(skb);
1197 data_len = skb->data_len;
1198
1199 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1200
1201 tx_buffer = first;
1202
1203 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1204 if (dma_mapping_error(tx_ring->dev, dma))
1205 goto dma_error;
1206
1207
1208 dma_unmap_len_set(tx_buffer, len, size);
1209 dma_unmap_addr_set(tx_buffer, dma, dma);
1210
1211 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1212
1213 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
1214 tx_desc->read.cmd_type_len =
1215 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
1216
1217 i++;
1218 tx_desc++;
1219 if (i == tx_ring->count) {
1220 tx_desc = IGC_TX_DESC(tx_ring, 0);
1221 i = 0;
1222 }
1223 tx_desc->read.olinfo_status = 0;
1224
1225 dma += IGC_MAX_DATA_PER_TXD;
1226 size -= IGC_MAX_DATA_PER_TXD;
1227
1228 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1229 }
1230
1231 if (likely(!data_len))
1232 break;
1233
1234 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1235
1236 i++;
1237 tx_desc++;
1238 if (i == tx_ring->count) {
1239 tx_desc = IGC_TX_DESC(tx_ring, 0);
1240 i = 0;
1241 }
1242 tx_desc->read.olinfo_status = 0;
1243
1244 size = skb_frag_size(frag);
1245 data_len -= size;
1246
1247 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1248 size, DMA_TO_DEVICE);
1249
1250 tx_buffer = &tx_ring->tx_buffer_info[i];
1251 }
1252
1253
1254 cmd_type |= size | IGC_TXD_DCMD;
1255 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1256
1257 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1258
1259
1260 first->time_stamp = jiffies;
1261
1262 skb_tx_timestamp(skb);
1263
1264
1265
1266
1267
1268
1269
1270
1271 wmb();
1272
1273
1274 first->next_to_watch = tx_desc;
1275
1276 i++;
1277 if (i == tx_ring->count)
1278 i = 0;
1279
1280 tx_ring->next_to_use = i;
1281
1282
1283 igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1284
1285 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1286 writel(i, tx_ring->tail);
1287 }
1288
1289 return 0;
1290dma_error:
1291 netdev_err(tx_ring->netdev, "TX DMA map failed\n");
1292 tx_buffer = &tx_ring->tx_buffer_info[i];
1293
1294
1295 while (tx_buffer != first) {
1296 if (dma_unmap_len(tx_buffer, len))
1297 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1298
1299 if (i-- == 0)
1300 i += tx_ring->count;
1301 tx_buffer = &tx_ring->tx_buffer_info[i];
1302 }
1303
1304 if (dma_unmap_len(tx_buffer, len))
1305 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1306
1307 dev_kfree_skb_any(tx_buffer->skb);
1308 tx_buffer->skb = NULL;
1309
1310 tx_ring->next_to_use = i;
1311
1312 return -1;
1313}
1314
1315static int igc_tso(struct igc_ring *tx_ring,
1316 struct igc_tx_buffer *first,
1317 u8 *hdr_len)
1318{
1319 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1320 struct sk_buff *skb = first->skb;
1321 union {
1322 struct iphdr *v4;
1323 struct ipv6hdr *v6;
1324 unsigned char *hdr;
1325 } ip;
1326 union {
1327 struct tcphdr *tcp;
1328 struct udphdr *udp;
1329 unsigned char *hdr;
1330 } l4;
1331 u32 paylen, l4_offset;
1332 int err;
1333
1334 if (skb->ip_summed != CHECKSUM_PARTIAL)
1335 return 0;
1336
1337 if (!skb_is_gso(skb))
1338 return 0;
1339
1340 err = skb_cow_head(skb, 0);
1341 if (err < 0)
1342 return err;
1343
1344 ip.hdr = skb_network_header(skb);
1345 l4.hdr = skb_checksum_start(skb);
1346
1347
1348 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1349
1350
1351 if (ip.v4->version == 4) {
1352 unsigned char *csum_start = skb_checksum_start(skb);
1353 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1354
1355
1356
1357
1358 ip.v4->check = csum_fold(csum_partial(trans_start,
1359 csum_start - trans_start,
1360 0));
1361 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1362
1363 ip.v4->tot_len = 0;
1364 first->tx_flags |= IGC_TX_FLAGS_TSO |
1365 IGC_TX_FLAGS_CSUM |
1366 IGC_TX_FLAGS_IPV4;
1367 } else {
1368 ip.v6->payload_len = 0;
1369 first->tx_flags |= IGC_TX_FLAGS_TSO |
1370 IGC_TX_FLAGS_CSUM;
1371 }
1372
1373
1374 l4_offset = l4.hdr - skb->data;
1375
1376
1377 paylen = skb->len - l4_offset;
1378 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1379
1380 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1381 csum_replace_by_diff(&l4.tcp->check,
1382 (__force __wsum)htonl(paylen));
1383 } else {
1384
1385 *hdr_len = sizeof(*l4.udp) + l4_offset;
1386 csum_replace_by_diff(&l4.udp->check,
1387 (__force __wsum)htonl(paylen));
1388 }
1389
1390
1391 first->gso_segs = skb_shinfo(skb)->gso_segs;
1392 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1393
1394
1395 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1396 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1397
1398
1399 vlan_macip_lens = l4.hdr - ip.hdr;
1400 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1401 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1402
1403 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
1404 type_tucmd, mss_l4len_idx);
1405
1406 return 1;
1407}
1408
1409static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1410 struct igc_ring *tx_ring)
1411{
1412 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1413 __be16 protocol = vlan_get_protocol(skb);
1414 struct igc_tx_buffer *first;
1415 u32 tx_flags = 0;
1416 unsigned short f;
1417 u8 hdr_len = 0;
1418 int tso = 0;
1419
1420
1421
1422
1423
1424
1425
1426 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1427 count += TXD_USE_COUNT(skb_frag_size(
1428 &skb_shinfo(skb)->frags[f]));
1429
1430 if (igc_maybe_stop_tx(tx_ring, count + 3)) {
1431
1432 return NETDEV_TX_BUSY;
1433 }
1434
1435
1436 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1437 first->type = IGC_TX_BUFFER_TYPE_SKB;
1438 first->skb = skb;
1439 first->bytecount = skb->len;
1440 first->gso_segs = 1;
1441
1442 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1443 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1444
1445
1446
1447
1448
1449 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
1450 !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
1451 &adapter->state)) {
1452 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1453 tx_flags |= IGC_TX_FLAGS_TSTAMP;
1454
1455 adapter->ptp_tx_skb = skb_get(skb);
1456 adapter->ptp_tx_start = jiffies;
1457 } else {
1458 adapter->tx_hwtstamp_skipped++;
1459 }
1460 }
1461
1462 if (skb_vlan_tag_present(skb)) {
1463 tx_flags |= IGC_TX_FLAGS_VLAN;
1464 tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT);
1465 }
1466
1467
1468 first->tx_flags = tx_flags;
1469 first->protocol = protocol;
1470
1471 tso = igc_tso(tx_ring, first, &hdr_len);
1472 if (tso < 0)
1473 goto out_drop;
1474 else if (!tso)
1475 igc_tx_csum(tx_ring, first);
1476
1477 igc_tx_map(tx_ring, first, hdr_len);
1478
1479 return NETDEV_TX_OK;
1480
1481out_drop:
1482 dev_kfree_skb_any(first->skb);
1483 first->skb = NULL;
1484
1485 return NETDEV_TX_OK;
1486}
1487
1488static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1489 struct sk_buff *skb)
1490{
1491 unsigned int r_idx = skb->queue_mapping;
1492
1493 if (r_idx >= adapter->num_tx_queues)
1494 r_idx = r_idx % adapter->num_tx_queues;
1495
1496 return adapter->tx_ring[r_idx];
1497}
1498
1499static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1500 struct net_device *netdev)
1501{
1502 struct igc_adapter *adapter = netdev_priv(netdev);
1503
1504
1505
1506
1507 if (skb->len < 17) {
1508 if (skb_padto(skb, 17))
1509 return NETDEV_TX_OK;
1510 skb->len = 17;
1511 }
1512
1513 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
1514}
1515
1516static void igc_rx_checksum(struct igc_ring *ring,
1517 union igc_adv_rx_desc *rx_desc,
1518 struct sk_buff *skb)
1519{
1520 skb_checksum_none_assert(skb);
1521
1522
1523 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
1524 return;
1525
1526
1527 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1528 return;
1529
1530
1531 if (igc_test_staterr(rx_desc,
1532 IGC_RXDEXT_STATERR_L4E |
1533 IGC_RXDEXT_STATERR_IPE)) {
1534
1535
1536
1537
1538 if (!(skb->len == 60 &&
1539 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
1540 u64_stats_update_begin(&ring->rx_syncp);
1541 ring->rx_stats.csum_err++;
1542 u64_stats_update_end(&ring->rx_syncp);
1543 }
1544
1545 return;
1546 }
1547
1548 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
1549 IGC_RXD_STAT_UDPCS))
1550 skb->ip_summed = CHECKSUM_UNNECESSARY;
1551
1552 netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
1553 le32_to_cpu(rx_desc->wb.upper.status_error));
1554}
1555
1556static inline void igc_rx_hash(struct igc_ring *ring,
1557 union igc_adv_rx_desc *rx_desc,
1558 struct sk_buff *skb)
1559{
1560 if (ring->netdev->features & NETIF_F_RXHASH)
1561 skb_set_hash(skb,
1562 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1563 PKT_HASH_TYPE_L3);
1564}
1565
1566static void igc_rx_vlan(struct igc_ring *rx_ring,
1567 union igc_adv_rx_desc *rx_desc,
1568 struct sk_buff *skb)
1569{
1570 struct net_device *dev = rx_ring->netdev;
1571 u16 vid;
1572
1573 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1574 igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) {
1575 if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) &&
1576 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
1577 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
1578 else
1579 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1580
1581 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1582 }
1583}
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595static void igc_process_skb_fields(struct igc_ring *rx_ring,
1596 union igc_adv_rx_desc *rx_desc,
1597 struct sk_buff *skb)
1598{
1599 igc_rx_hash(rx_ring, rx_desc, skb);
1600
1601 igc_rx_checksum(rx_ring, rx_desc, skb);
1602
1603 igc_rx_vlan(rx_ring, rx_desc, skb);
1604
1605 skb_record_rx_queue(skb, rx_ring->queue_index);
1606
1607 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1608}
1609
1610static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features)
1611{
1612 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1613 struct igc_adapter *adapter = netdev_priv(netdev);
1614 struct igc_hw *hw = &adapter->hw;
1615 u32 ctrl;
1616
1617 ctrl = rd32(IGC_CTRL);
1618
1619 if (enable) {
1620
1621 ctrl |= IGC_CTRL_VME;
1622 } else {
1623
1624 ctrl &= ~IGC_CTRL_VME;
1625 }
1626 wr32(IGC_CTRL, ctrl);
1627}
1628
1629static void igc_restore_vlan(struct igc_adapter *adapter)
1630{
1631 igc_vlan_mode(adapter->netdev, adapter->netdev->features);
1632}
1633
1634static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1635 const unsigned int size,
1636 int *rx_buffer_pgcnt)
1637{
1638 struct igc_rx_buffer *rx_buffer;
1639
1640 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1641 *rx_buffer_pgcnt =
1642#if (PAGE_SIZE < 8192)
1643 page_count(rx_buffer->page);
1644#else
1645 0;
1646#endif
1647 prefetchw(rx_buffer->page);
1648
1649
1650 dma_sync_single_range_for_cpu(rx_ring->dev,
1651 rx_buffer->dma,
1652 rx_buffer->page_offset,
1653 size,
1654 DMA_FROM_DEVICE);
1655
1656 rx_buffer->pagecnt_bias--;
1657
1658 return rx_buffer;
1659}
1660
1661static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer,
1662 unsigned int truesize)
1663{
1664#if (PAGE_SIZE < 8192)
1665 buffer->page_offset ^= truesize;
1666#else
1667 buffer->page_offset += truesize;
1668#endif
1669}
1670
1671static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring,
1672 unsigned int size)
1673{
1674 unsigned int truesize;
1675
1676#if (PAGE_SIZE < 8192)
1677 truesize = igc_rx_pg_size(ring) / 2;
1678#else
1679 truesize = ring_uses_build_skb(ring) ?
1680 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1681 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1682 SKB_DATA_ALIGN(size);
1683#endif
1684 return truesize;
1685}
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696static void igc_add_rx_frag(struct igc_ring *rx_ring,
1697 struct igc_rx_buffer *rx_buffer,
1698 struct sk_buff *skb,
1699 unsigned int size)
1700{
1701 unsigned int truesize;
1702
1703#if (PAGE_SIZE < 8192)
1704 truesize = igc_rx_pg_size(rx_ring) / 2;
1705#else
1706 truesize = ring_uses_build_skb(rx_ring) ?
1707 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1708 SKB_DATA_ALIGN(size);
1709#endif
1710 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1711 rx_buffer->page_offset, size, truesize);
1712
1713 igc_rx_buffer_flip(rx_buffer, truesize);
1714}
1715
1716static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1717 struct igc_rx_buffer *rx_buffer,
1718 union igc_adv_rx_desc *rx_desc,
1719 unsigned int size)
1720{
1721 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1722 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1723 struct sk_buff *skb;
1724
1725
1726 net_prefetch(va);
1727
1728
1729 skb = build_skb(va - IGC_SKB_PAD, truesize);
1730 if (unlikely(!skb))
1731 return NULL;
1732
1733
1734 skb_reserve(skb, IGC_SKB_PAD);
1735 __skb_put(skb, size);
1736
1737 igc_rx_buffer_flip(rx_buffer, truesize);
1738 return skb;
1739}
1740
1741static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1742 struct igc_rx_buffer *rx_buffer,
1743 struct xdp_buff *xdp,
1744 ktime_t timestamp)
1745{
1746 unsigned int size = xdp->data_end - xdp->data;
1747 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1748 void *va = xdp->data;
1749 unsigned int headlen;
1750 struct sk_buff *skb;
1751
1752
1753 net_prefetch(va);
1754
1755
1756 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
1757 if (unlikely(!skb))
1758 return NULL;
1759
1760 if (timestamp)
1761 skb_hwtstamps(skb)->hwtstamp = timestamp;
1762
1763
1764 headlen = size;
1765 if (headlen > IGC_RX_HDR_LEN)
1766 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
1767
1768
1769 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1770
1771
1772 size -= headlen;
1773 if (size) {
1774 skb_add_rx_frag(skb, 0, rx_buffer->page,
1775 (va + headlen) - page_address(rx_buffer->page),
1776 size, truesize);
1777 igc_rx_buffer_flip(rx_buffer, truesize);
1778 } else {
1779 rx_buffer->pagecnt_bias++;
1780 }
1781
1782 return skb;
1783}
1784
1785
1786
1787
1788
1789
1790
1791
1792static void igc_reuse_rx_page(struct igc_ring *rx_ring,
1793 struct igc_rx_buffer *old_buff)
1794{
1795 u16 nta = rx_ring->next_to_alloc;
1796 struct igc_rx_buffer *new_buff;
1797
1798 new_buff = &rx_ring->rx_buffer_info[nta];
1799
1800
1801 nta++;
1802 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1803
1804
1805
1806
1807
1808 new_buff->dma = old_buff->dma;
1809 new_buff->page = old_buff->page;
1810 new_buff->page_offset = old_buff->page_offset;
1811 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1812}
1813
1814static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
1815 int rx_buffer_pgcnt)
1816{
1817 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1818 struct page *page = rx_buffer->page;
1819
1820
1821 if (!dev_page_is_reusable(page))
1822 return false;
1823
1824#if (PAGE_SIZE < 8192)
1825
1826 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
1827 return false;
1828#else
1829#define IGC_LAST_OFFSET \
1830 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
1831
1832 if (rx_buffer->page_offset > IGC_LAST_OFFSET)
1833 return false;
1834#endif
1835
1836
1837
1838
1839
1840 if (unlikely(pagecnt_bias == 1)) {
1841 page_ref_add(page, USHRT_MAX - 1);
1842 rx_buffer->pagecnt_bias = USHRT_MAX;
1843 }
1844
1845 return true;
1846}
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858static bool igc_is_non_eop(struct igc_ring *rx_ring,
1859 union igc_adv_rx_desc *rx_desc)
1860{
1861 u32 ntc = rx_ring->next_to_clean + 1;
1862
1863
1864 ntc = (ntc < rx_ring->count) ? ntc : 0;
1865 rx_ring->next_to_clean = ntc;
1866
1867 prefetch(IGC_RX_DESC(rx_ring, ntc));
1868
1869 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
1870 return false;
1871
1872 return true;
1873}
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889static bool igc_cleanup_headers(struct igc_ring *rx_ring,
1890 union igc_adv_rx_desc *rx_desc,
1891 struct sk_buff *skb)
1892{
1893
1894 if (IS_ERR(skb))
1895 return true;
1896
1897 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
1898 struct net_device *netdev = rx_ring->netdev;
1899
1900 if (!(netdev->features & NETIF_F_RXALL)) {
1901 dev_kfree_skb_any(skb);
1902 return true;
1903 }
1904 }
1905
1906
1907 if (eth_skb_pad(skb))
1908 return true;
1909
1910 return false;
1911}
1912
1913static void igc_put_rx_buffer(struct igc_ring *rx_ring,
1914 struct igc_rx_buffer *rx_buffer,
1915 int rx_buffer_pgcnt)
1916{
1917 if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
1918
1919 igc_reuse_rx_page(rx_ring, rx_buffer);
1920 } else {
1921
1922
1923
1924 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1925 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1926 IGC_RX_DMA_ATTR);
1927 __page_frag_cache_drain(rx_buffer->page,
1928 rx_buffer->pagecnt_bias);
1929 }
1930
1931
1932 rx_buffer->page = NULL;
1933}
1934
1935static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
1936{
1937 struct igc_adapter *adapter = rx_ring->q_vector->adapter;
1938
1939 if (ring_uses_build_skb(rx_ring))
1940 return IGC_SKB_PAD;
1941 if (igc_xdp_is_enabled(adapter))
1942 return XDP_PACKET_HEADROOM;
1943
1944 return 0;
1945}
1946
1947static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
1948 struct igc_rx_buffer *bi)
1949{
1950 struct page *page = bi->page;
1951 dma_addr_t dma;
1952
1953
1954 if (likely(page))
1955 return true;
1956
1957
1958 page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
1959 if (unlikely(!page)) {
1960 rx_ring->rx_stats.alloc_failed++;
1961 return false;
1962 }
1963
1964
1965 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1966 igc_rx_pg_size(rx_ring),
1967 DMA_FROM_DEVICE,
1968 IGC_RX_DMA_ATTR);
1969
1970
1971
1972
1973 if (dma_mapping_error(rx_ring->dev, dma)) {
1974 __free_page(page);
1975
1976 rx_ring->rx_stats.alloc_failed++;
1977 return false;
1978 }
1979
1980 bi->dma = dma;
1981 bi->page = page;
1982 bi->page_offset = igc_rx_offset(rx_ring);
1983 page_ref_add(page, USHRT_MAX - 1);
1984 bi->pagecnt_bias = USHRT_MAX;
1985
1986 return true;
1987}
1988
1989
1990
1991
1992
1993
1994static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
1995{
1996 union igc_adv_rx_desc *rx_desc;
1997 u16 i = rx_ring->next_to_use;
1998 struct igc_rx_buffer *bi;
1999 u16 bufsz;
2000
2001
2002 if (!cleaned_count)
2003 return;
2004
2005 rx_desc = IGC_RX_DESC(rx_ring, i);
2006 bi = &rx_ring->rx_buffer_info[i];
2007 i -= rx_ring->count;
2008
2009 bufsz = igc_rx_bufsz(rx_ring);
2010
2011 do {
2012 if (!igc_alloc_mapped_page(rx_ring, bi))
2013 break;
2014
2015
2016 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
2017 bi->page_offset, bufsz,
2018 DMA_FROM_DEVICE);
2019
2020
2021
2022
2023 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
2024
2025 rx_desc++;
2026 bi++;
2027 i++;
2028 if (unlikely(!i)) {
2029 rx_desc = IGC_RX_DESC(rx_ring, 0);
2030 bi = rx_ring->rx_buffer_info;
2031 i -= rx_ring->count;
2032 }
2033
2034
2035 rx_desc->wb.upper.length = 0;
2036
2037 cleaned_count--;
2038 } while (cleaned_count);
2039
2040 i += rx_ring->count;
2041
2042 if (rx_ring->next_to_use != i) {
2043
2044 rx_ring->next_to_use = i;
2045
2046
2047 rx_ring->next_to_alloc = i;
2048
2049
2050
2051
2052
2053
2054 wmb();
2055 writel(i, rx_ring->tail);
2056 }
2057}
2058
2059static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
2060{
2061 union igc_adv_rx_desc *desc;
2062 u16 i = ring->next_to_use;
2063 struct igc_rx_buffer *bi;
2064 dma_addr_t dma;
2065 bool ok = true;
2066
2067 if (!count)
2068 return ok;
2069
2070 desc = IGC_RX_DESC(ring, i);
2071 bi = &ring->rx_buffer_info[i];
2072 i -= ring->count;
2073
2074 do {
2075 bi->xdp = xsk_buff_alloc(ring->xsk_pool);
2076 if (!bi->xdp) {
2077 ok = false;
2078 break;
2079 }
2080
2081 dma = xsk_buff_xdp_get_dma(bi->xdp);
2082 desc->read.pkt_addr = cpu_to_le64(dma);
2083
2084 desc++;
2085 bi++;
2086 i++;
2087 if (unlikely(!i)) {
2088 desc = IGC_RX_DESC(ring, 0);
2089 bi = ring->rx_buffer_info;
2090 i -= ring->count;
2091 }
2092
2093
2094 desc->wb.upper.length = 0;
2095
2096 count--;
2097 } while (count);
2098
2099 i += ring->count;
2100
2101 if (ring->next_to_use != i) {
2102 ring->next_to_use = i;
2103
2104
2105
2106
2107
2108
2109 wmb();
2110 writel(i, ring->tail);
2111 }
2112
2113 return ok;
2114}
2115
2116static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
2117 struct xdp_frame *xdpf,
2118 struct igc_ring *ring)
2119{
2120 dma_addr_t dma;
2121
2122 dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
2123 if (dma_mapping_error(ring->dev, dma)) {
2124 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
2125 return -ENOMEM;
2126 }
2127
2128 buffer->type = IGC_TX_BUFFER_TYPE_XDP;
2129 buffer->xdpf = xdpf;
2130 buffer->protocol = 0;
2131 buffer->bytecount = xdpf->len;
2132 buffer->gso_segs = 1;
2133 buffer->time_stamp = jiffies;
2134 dma_unmap_len_set(buffer, len, xdpf->len);
2135 dma_unmap_addr_set(buffer, dma, dma);
2136 return 0;
2137}
2138
2139
2140static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
2141 struct xdp_frame *xdpf)
2142{
2143 struct igc_tx_buffer *buffer;
2144 union igc_adv_tx_desc *desc;
2145 u32 cmd_type, olinfo_status;
2146 int err;
2147
2148 if (!igc_desc_unused(ring))
2149 return -EBUSY;
2150
2151 buffer = &ring->tx_buffer_info[ring->next_to_use];
2152 err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
2153 if (err)
2154 return err;
2155
2156 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2157 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2158 buffer->bytecount;
2159 olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
2160
2161 desc = IGC_TX_DESC(ring, ring->next_to_use);
2162 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2163 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2164 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
2165
2166 netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
2167
2168 buffer->next_to_watch = desc;
2169
2170 ring->next_to_use++;
2171 if (ring->next_to_use == ring->count)
2172 ring->next_to_use = 0;
2173
2174 return 0;
2175}
2176
2177static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
2178 int cpu)
2179{
2180 int index = cpu;
2181
2182 if (unlikely(index < 0))
2183 index = 0;
2184
2185 while (index >= adapter->num_tx_queues)
2186 index -= adapter->num_tx_queues;
2187
2188 return adapter->tx_ring[index];
2189}
2190
2191static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
2192{
2193 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2194 int cpu = smp_processor_id();
2195 struct netdev_queue *nq;
2196 struct igc_ring *ring;
2197 int res;
2198
2199 if (unlikely(!xdpf))
2200 return -EFAULT;
2201
2202 ring = igc_xdp_get_tx_ring(adapter, cpu);
2203 nq = txring_txq(ring);
2204
2205 __netif_tx_lock(nq, cpu);
2206 res = igc_xdp_init_tx_descriptor(ring, xdpf);
2207 __netif_tx_unlock(nq);
2208 return res;
2209}
2210
2211
2212static int __igc_xdp_run_prog(struct igc_adapter *adapter,
2213 struct bpf_prog *prog,
2214 struct xdp_buff *xdp)
2215{
2216 u32 act = bpf_prog_run_xdp(prog, xdp);
2217
2218 switch (act) {
2219 case XDP_PASS:
2220 return IGC_XDP_PASS;
2221 case XDP_TX:
2222 if (igc_xdp_xmit_back(adapter, xdp) < 0)
2223 goto out_failure;
2224 return IGC_XDP_TX;
2225 case XDP_REDIRECT:
2226 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
2227 goto out_failure;
2228 return IGC_XDP_REDIRECT;
2229 break;
2230 default:
2231 bpf_warn_invalid_xdp_action(act);
2232 fallthrough;
2233 case XDP_ABORTED:
2234out_failure:
2235 trace_xdp_exception(adapter->netdev, prog, act);
2236 fallthrough;
2237 case XDP_DROP:
2238 return IGC_XDP_CONSUMED;
2239 }
2240}
2241
2242static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
2243 struct xdp_buff *xdp)
2244{
2245 struct bpf_prog *prog;
2246 int res;
2247
2248 prog = READ_ONCE(adapter->xdp_prog);
2249 if (!prog) {
2250 res = IGC_XDP_PASS;
2251 goto out;
2252 }
2253
2254 res = __igc_xdp_run_prog(adapter, prog, xdp);
2255
2256out:
2257 return ERR_PTR(-res);
2258}
2259
2260
2261static void igc_flush_tx_descriptors(struct igc_ring *ring)
2262{
2263
2264
2265
2266
2267 wmb();
2268 writel(ring->next_to_use, ring->tail);
2269}
2270
2271static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
2272{
2273 int cpu = smp_processor_id();
2274 struct netdev_queue *nq;
2275 struct igc_ring *ring;
2276
2277 if (status & IGC_XDP_TX) {
2278 ring = igc_xdp_get_tx_ring(adapter, cpu);
2279 nq = txring_txq(ring);
2280
2281 __netif_tx_lock(nq, cpu);
2282 igc_flush_tx_descriptors(ring);
2283 __netif_tx_unlock(nq);
2284 }
2285
2286 if (status & IGC_XDP_REDIRECT)
2287 xdp_do_flush();
2288}
2289
2290static void igc_update_rx_stats(struct igc_q_vector *q_vector,
2291 unsigned int packets, unsigned int bytes)
2292{
2293 struct igc_ring *ring = q_vector->rx.ring;
2294
2295 u64_stats_update_begin(&ring->rx_syncp);
2296 ring->rx_stats.packets += packets;
2297 ring->rx_stats.bytes += bytes;
2298 u64_stats_update_end(&ring->rx_syncp);
2299
2300 q_vector->rx.total_packets += packets;
2301 q_vector->rx.total_bytes += bytes;
2302}
2303
2304static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2305{
2306 unsigned int total_bytes = 0, total_packets = 0;
2307 struct igc_adapter *adapter = q_vector->adapter;
2308 struct igc_ring *rx_ring = q_vector->rx.ring;
2309 struct sk_buff *skb = rx_ring->skb;
2310 u16 cleaned_count = igc_desc_unused(rx_ring);
2311 int xdp_status = 0, rx_buffer_pgcnt;
2312
2313 while (likely(total_packets < budget)) {
2314 union igc_adv_rx_desc *rx_desc;
2315 struct igc_rx_buffer *rx_buffer;
2316 unsigned int size, truesize;
2317 ktime_t timestamp = 0;
2318 struct xdp_buff xdp;
2319 int pkt_offset = 0;
2320 void *pktbuf;
2321
2322
2323 if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
2324 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2325 cleaned_count = 0;
2326 }
2327
2328 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
2329 size = le16_to_cpu(rx_desc->wb.upper.length);
2330 if (!size)
2331 break;
2332
2333
2334
2335
2336
2337 dma_rmb();
2338
2339 rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
2340 truesize = igc_get_rx_frame_truesize(rx_ring, size);
2341
2342 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
2343
2344 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
2345 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2346 pktbuf);
2347 pkt_offset = IGC_TS_HDR_LEN;
2348 size -= IGC_TS_HDR_LEN;
2349 }
2350
2351 if (!skb) {
2352 xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq);
2353 xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
2354 igc_rx_offset(rx_ring) + pkt_offset, size, false);
2355
2356 skb = igc_xdp_run_prog(adapter, &xdp);
2357 }
2358
2359 if (IS_ERR(skb)) {
2360 unsigned int xdp_res = -PTR_ERR(skb);
2361
2362 switch (xdp_res) {
2363 case IGC_XDP_CONSUMED:
2364 rx_buffer->pagecnt_bias++;
2365 break;
2366 case IGC_XDP_TX:
2367 case IGC_XDP_REDIRECT:
2368 igc_rx_buffer_flip(rx_buffer, truesize);
2369 xdp_status |= xdp_res;
2370 break;
2371 }
2372
2373 total_packets++;
2374 total_bytes += size;
2375 } else if (skb)
2376 igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
2377 else if (ring_uses_build_skb(rx_ring))
2378 skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
2379 else
2380 skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
2381 timestamp);
2382
2383
2384 if (!skb) {
2385 rx_ring->rx_stats.alloc_failed++;
2386 rx_buffer->pagecnt_bias++;
2387 break;
2388 }
2389
2390 igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
2391 cleaned_count++;
2392
2393
2394 if (igc_is_non_eop(rx_ring, rx_desc))
2395 continue;
2396
2397
2398 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
2399 skb = NULL;
2400 continue;
2401 }
2402
2403
2404 total_bytes += skb->len;
2405
2406
2407 igc_process_skb_fields(rx_ring, rx_desc, skb);
2408
2409 napi_gro_receive(&q_vector->napi, skb);
2410
2411
2412 skb = NULL;
2413
2414
2415 total_packets++;
2416 }
2417
2418 if (xdp_status)
2419 igc_finalize_xdp(adapter, xdp_status);
2420
2421
2422 rx_ring->skb = skb;
2423
2424 igc_update_rx_stats(q_vector, total_packets, total_bytes);
2425
2426 if (cleaned_count)
2427 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2428
2429 return total_packets;
2430}
2431
2432static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
2433 struct xdp_buff *xdp)
2434{
2435 unsigned int metasize = xdp->data - xdp->data_meta;
2436 unsigned int datasize = xdp->data_end - xdp->data;
2437 unsigned int totalsize = metasize + datasize;
2438 struct sk_buff *skb;
2439
2440 skb = __napi_alloc_skb(&ring->q_vector->napi,
2441 xdp->data_end - xdp->data_hard_start,
2442 GFP_ATOMIC | __GFP_NOWARN);
2443 if (unlikely(!skb))
2444 return NULL;
2445
2446 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
2447 memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
2448 if (metasize)
2449 skb_metadata_set(skb, metasize);
2450
2451 return skb;
2452}
2453
2454static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
2455 union igc_adv_rx_desc *desc,
2456 struct xdp_buff *xdp,
2457 ktime_t timestamp)
2458{
2459 struct igc_ring *ring = q_vector->rx.ring;
2460 struct sk_buff *skb;
2461
2462 skb = igc_construct_skb_zc(ring, xdp);
2463 if (!skb) {
2464 ring->rx_stats.alloc_failed++;
2465 return;
2466 }
2467
2468 if (timestamp)
2469 skb_hwtstamps(skb)->hwtstamp = timestamp;
2470
2471 if (igc_cleanup_headers(ring, desc, skb))
2472 return;
2473
2474 igc_process_skb_fields(ring, desc, skb);
2475 napi_gro_receive(&q_vector->napi, skb);
2476}
2477
2478static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
2479{
2480 struct igc_adapter *adapter = q_vector->adapter;
2481 struct igc_ring *ring = q_vector->rx.ring;
2482 u16 cleaned_count = igc_desc_unused(ring);
2483 int total_bytes = 0, total_packets = 0;
2484 u16 ntc = ring->next_to_clean;
2485 struct bpf_prog *prog;
2486 bool failure = false;
2487 int xdp_status = 0;
2488
2489 rcu_read_lock();
2490
2491 prog = READ_ONCE(adapter->xdp_prog);
2492
2493 while (likely(total_packets < budget)) {
2494 union igc_adv_rx_desc *desc;
2495 struct igc_rx_buffer *bi;
2496 ktime_t timestamp = 0;
2497 unsigned int size;
2498 int res;
2499
2500 desc = IGC_RX_DESC(ring, ntc);
2501 size = le16_to_cpu(desc->wb.upper.length);
2502 if (!size)
2503 break;
2504
2505
2506
2507
2508
2509 dma_rmb();
2510
2511 bi = &ring->rx_buffer_info[ntc];
2512
2513 if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
2514 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2515 bi->xdp->data);
2516
2517 bi->xdp->data += IGC_TS_HDR_LEN;
2518
2519
2520
2521
2522 bi->xdp->data_meta += IGC_TS_HDR_LEN;
2523 size -= IGC_TS_HDR_LEN;
2524 }
2525
2526 bi->xdp->data_end = bi->xdp->data + size;
2527 xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
2528
2529 res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
2530 switch (res) {
2531 case IGC_XDP_PASS:
2532 igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
2533 fallthrough;
2534 case IGC_XDP_CONSUMED:
2535 xsk_buff_free(bi->xdp);
2536 break;
2537 case IGC_XDP_TX:
2538 case IGC_XDP_REDIRECT:
2539 xdp_status |= res;
2540 break;
2541 }
2542
2543 bi->xdp = NULL;
2544 total_bytes += size;
2545 total_packets++;
2546 cleaned_count++;
2547 ntc++;
2548 if (ntc == ring->count)
2549 ntc = 0;
2550 }
2551
2552 ring->next_to_clean = ntc;
2553 rcu_read_unlock();
2554
2555 if (cleaned_count >= IGC_RX_BUFFER_WRITE)
2556 failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count);
2557
2558 if (xdp_status)
2559 igc_finalize_xdp(adapter, xdp_status);
2560
2561 igc_update_rx_stats(q_vector, total_packets, total_bytes);
2562
2563 if (xsk_uses_need_wakeup(ring->xsk_pool)) {
2564 if (failure || ring->next_to_clean == ring->next_to_use)
2565 xsk_set_rx_need_wakeup(ring->xsk_pool);
2566 else
2567 xsk_clear_rx_need_wakeup(ring->xsk_pool);
2568 return total_packets;
2569 }
2570
2571 return failure ? budget : total_packets;
2572}
2573
2574static void igc_update_tx_stats(struct igc_q_vector *q_vector,
2575 unsigned int packets, unsigned int bytes)
2576{
2577 struct igc_ring *ring = q_vector->tx.ring;
2578
2579 u64_stats_update_begin(&ring->tx_syncp);
2580 ring->tx_stats.bytes += bytes;
2581 ring->tx_stats.packets += packets;
2582 u64_stats_update_end(&ring->tx_syncp);
2583
2584 q_vector->tx.total_bytes += bytes;
2585 q_vector->tx.total_packets += packets;
2586}
2587
2588static void igc_xdp_xmit_zc(struct igc_ring *ring)
2589{
2590 struct xsk_buff_pool *pool = ring->xsk_pool;
2591 struct netdev_queue *nq = txring_txq(ring);
2592 union igc_adv_tx_desc *tx_desc = NULL;
2593 int cpu = smp_processor_id();
2594 u16 ntu = ring->next_to_use;
2595 struct xdp_desc xdp_desc;
2596 u16 budget;
2597
2598 if (!netif_carrier_ok(ring->netdev))
2599 return;
2600
2601 __netif_tx_lock(nq, cpu);
2602
2603 budget = igc_desc_unused(ring);
2604
2605 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
2606 u32 cmd_type, olinfo_status;
2607 struct igc_tx_buffer *bi;
2608 dma_addr_t dma;
2609
2610 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2611 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2612 xdp_desc.len;
2613 olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
2614
2615 dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2616 xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
2617
2618 tx_desc = IGC_TX_DESC(ring, ntu);
2619 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2620 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2621 tx_desc->read.buffer_addr = cpu_to_le64(dma);
2622
2623 bi = &ring->tx_buffer_info[ntu];
2624 bi->type = IGC_TX_BUFFER_TYPE_XSK;
2625 bi->protocol = 0;
2626 bi->bytecount = xdp_desc.len;
2627 bi->gso_segs = 1;
2628 bi->time_stamp = jiffies;
2629 bi->next_to_watch = tx_desc;
2630
2631 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len);
2632
2633 ntu++;
2634 if (ntu == ring->count)
2635 ntu = 0;
2636 }
2637
2638 ring->next_to_use = ntu;
2639 if (tx_desc) {
2640 igc_flush_tx_descriptors(ring);
2641 xsk_tx_release(pool);
2642 }
2643
2644 __netif_tx_unlock(nq);
2645}
2646
2647
2648
2649
2650
2651
2652
2653
2654static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
2655{
2656 struct igc_adapter *adapter = q_vector->adapter;
2657 unsigned int total_bytes = 0, total_packets = 0;
2658 unsigned int budget = q_vector->tx.work_limit;
2659 struct igc_ring *tx_ring = q_vector->tx.ring;
2660 unsigned int i = tx_ring->next_to_clean;
2661 struct igc_tx_buffer *tx_buffer;
2662 union igc_adv_tx_desc *tx_desc;
2663 u32 xsk_frames = 0;
2664
2665 if (test_bit(__IGC_DOWN, &adapter->state))
2666 return true;
2667
2668 tx_buffer = &tx_ring->tx_buffer_info[i];
2669 tx_desc = IGC_TX_DESC(tx_ring, i);
2670 i -= tx_ring->count;
2671
2672 do {
2673 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
2674
2675
2676 if (!eop_desc)
2677 break;
2678
2679
2680 smp_rmb();
2681
2682
2683 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
2684 break;
2685
2686
2687 tx_buffer->next_to_watch = NULL;
2688
2689
2690 total_bytes += tx_buffer->bytecount;
2691 total_packets += tx_buffer->gso_segs;
2692
2693 switch (tx_buffer->type) {
2694 case IGC_TX_BUFFER_TYPE_XSK:
2695 xsk_frames++;
2696 break;
2697 case IGC_TX_BUFFER_TYPE_XDP:
2698 xdp_return_frame(tx_buffer->xdpf);
2699 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2700 break;
2701 case IGC_TX_BUFFER_TYPE_SKB:
2702 napi_consume_skb(tx_buffer->skb, napi_budget);
2703 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2704 break;
2705 default:
2706 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
2707 break;
2708 }
2709
2710
2711 while (tx_desc != eop_desc) {
2712 tx_buffer++;
2713 tx_desc++;
2714 i++;
2715 if (unlikely(!i)) {
2716 i -= tx_ring->count;
2717 tx_buffer = tx_ring->tx_buffer_info;
2718 tx_desc = IGC_TX_DESC(tx_ring, 0);
2719 }
2720
2721
2722 if (dma_unmap_len(tx_buffer, len))
2723 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2724 }
2725
2726
2727 tx_buffer++;
2728 tx_desc++;
2729 i++;
2730 if (unlikely(!i)) {
2731 i -= tx_ring->count;
2732 tx_buffer = tx_ring->tx_buffer_info;
2733 tx_desc = IGC_TX_DESC(tx_ring, 0);
2734 }
2735
2736
2737 prefetch(tx_desc);
2738
2739
2740 budget--;
2741 } while (likely(budget));
2742
2743 netdev_tx_completed_queue(txring_txq(tx_ring),
2744 total_packets, total_bytes);
2745
2746 i += tx_ring->count;
2747 tx_ring->next_to_clean = i;
2748
2749 igc_update_tx_stats(q_vector, total_packets, total_bytes);
2750
2751 if (tx_ring->xsk_pool) {
2752 if (xsk_frames)
2753 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
2754 if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
2755 xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
2756 igc_xdp_xmit_zc(tx_ring);
2757 }
2758
2759 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
2760 struct igc_hw *hw = &adapter->hw;
2761
2762
2763
2764
2765 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
2766 if (tx_buffer->next_to_watch &&
2767 time_after(jiffies, tx_buffer->time_stamp +
2768 (adapter->tx_timeout_factor * HZ)) &&
2769 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
2770
2771 netdev_err(tx_ring->netdev,
2772 "Detected Tx Unit Hang\n"
2773 " Tx Queue <%d>\n"
2774 " TDH <%x>\n"
2775 " TDT <%x>\n"
2776 " next_to_use <%x>\n"
2777 " next_to_clean <%x>\n"
2778 "buffer_info[next_to_clean]\n"
2779 " time_stamp <%lx>\n"
2780 " next_to_watch <%p>\n"
2781 " jiffies <%lx>\n"
2782 " desc.status <%x>\n",
2783 tx_ring->queue_index,
2784 rd32(IGC_TDH(tx_ring->reg_idx)),
2785 readl(tx_ring->tail),
2786 tx_ring->next_to_use,
2787 tx_ring->next_to_clean,
2788 tx_buffer->time_stamp,
2789 tx_buffer->next_to_watch,
2790 jiffies,
2791 tx_buffer->next_to_watch->wb.status);
2792 netif_stop_subqueue(tx_ring->netdev,
2793 tx_ring->queue_index);
2794
2795
2796 return true;
2797 }
2798 }
2799
2800#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
2801 if (unlikely(total_packets &&
2802 netif_carrier_ok(tx_ring->netdev) &&
2803 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
2804
2805
2806
2807 smp_mb();
2808 if (__netif_subqueue_stopped(tx_ring->netdev,
2809 tx_ring->queue_index) &&
2810 !(test_bit(__IGC_DOWN, &adapter->state))) {
2811 netif_wake_subqueue(tx_ring->netdev,
2812 tx_ring->queue_index);
2813
2814 u64_stats_update_begin(&tx_ring->tx_syncp);
2815 tx_ring->tx_stats.restart_queue++;
2816 u64_stats_update_end(&tx_ring->tx_syncp);
2817 }
2818 }
2819
2820 return !!budget;
2821}
2822
2823static int igc_find_mac_filter(struct igc_adapter *adapter,
2824 enum igc_mac_filter_type type, const u8 *addr)
2825{
2826 struct igc_hw *hw = &adapter->hw;
2827 int max_entries = hw->mac.rar_entry_count;
2828 u32 ral, rah;
2829 int i;
2830
2831 for (i = 0; i < max_entries; i++) {
2832 ral = rd32(IGC_RAL(i));
2833 rah = rd32(IGC_RAH(i));
2834
2835 if (!(rah & IGC_RAH_AV))
2836 continue;
2837 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
2838 continue;
2839 if ((rah & IGC_RAH_RAH_MASK) !=
2840 le16_to_cpup((__le16 *)(addr + 4)))
2841 continue;
2842 if (ral != le32_to_cpup((__le32 *)(addr)))
2843 continue;
2844
2845 return i;
2846 }
2847
2848 return -1;
2849}
2850
2851static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
2852{
2853 struct igc_hw *hw = &adapter->hw;
2854 int max_entries = hw->mac.rar_entry_count;
2855 u32 rah;
2856 int i;
2857
2858 for (i = 0; i < max_entries; i++) {
2859 rah = rd32(IGC_RAH(i));
2860
2861 if (!(rah & IGC_RAH_AV))
2862 return i;
2863 }
2864
2865 return -1;
2866}
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879static int igc_add_mac_filter(struct igc_adapter *adapter,
2880 enum igc_mac_filter_type type, const u8 *addr,
2881 int queue)
2882{
2883 struct net_device *dev = adapter->netdev;
2884 int index;
2885
2886 index = igc_find_mac_filter(adapter, type, addr);
2887 if (index >= 0)
2888 goto update_filter;
2889
2890 index = igc_get_avail_mac_filter_slot(adapter);
2891 if (index < 0)
2892 return -ENOSPC;
2893
2894 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
2895 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2896 addr, queue);
2897
2898update_filter:
2899 igc_set_mac_filter_hw(adapter, index, type, addr, queue);
2900 return 0;
2901}
2902
2903
2904
2905
2906
2907
2908
2909static void igc_del_mac_filter(struct igc_adapter *adapter,
2910 enum igc_mac_filter_type type, const u8 *addr)
2911{
2912 struct net_device *dev = adapter->netdev;
2913 int index;
2914
2915 index = igc_find_mac_filter(adapter, type, addr);
2916 if (index < 0)
2917 return;
2918
2919 if (index == 0) {
2920
2921
2922
2923
2924 netdev_dbg(dev, "Disable default MAC filter queue assignment");
2925
2926 igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
2927 } else {
2928 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
2929 index,
2930 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2931 addr);
2932
2933 igc_clear_mac_filter_hw(adapter, index);
2934 }
2935}
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
2946 int queue)
2947{
2948 struct net_device *dev = adapter->netdev;
2949 struct igc_hw *hw = &adapter->hw;
2950 u32 vlanpqf;
2951
2952 vlanpqf = rd32(IGC_VLANPQF);
2953
2954 if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
2955 netdev_dbg(dev, "VLAN priority filter already in use\n");
2956 return -EEXIST;
2957 }
2958
2959 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
2960 vlanpqf |= IGC_VLANPQF_VALID(prio);
2961
2962 wr32(IGC_VLANPQF, vlanpqf);
2963
2964 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
2965 prio, queue);
2966 return 0;
2967}
2968
2969
2970
2971
2972
2973
2974static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
2975{
2976 struct igc_hw *hw = &adapter->hw;
2977 u32 vlanpqf;
2978
2979 vlanpqf = rd32(IGC_VLANPQF);
2980
2981 vlanpqf &= ~IGC_VLANPQF_VALID(prio);
2982 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
2983
2984 wr32(IGC_VLANPQF, vlanpqf);
2985
2986 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
2987 prio);
2988}
2989
2990static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
2991{
2992 struct igc_hw *hw = &adapter->hw;
2993 int i;
2994
2995 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
2996 u32 etqf = rd32(IGC_ETQF(i));
2997
2998 if (!(etqf & IGC_ETQF_FILTER_ENABLE))
2999 return i;
3000 }
3001
3002 return -1;
3003}
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
3016 int queue)
3017{
3018 struct igc_hw *hw = &adapter->hw;
3019 int index;
3020 u32 etqf;
3021
3022 index = igc_get_avail_etype_filter_slot(adapter);
3023 if (index < 0)
3024 return -ENOSPC;
3025
3026 etqf = rd32(IGC_ETQF(index));
3027
3028 etqf &= ~IGC_ETQF_ETYPE_MASK;
3029 etqf |= etype;
3030
3031 if (queue >= 0) {
3032 etqf &= ~IGC_ETQF_QUEUE_MASK;
3033 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
3034 etqf |= IGC_ETQF_QUEUE_ENABLE;
3035 }
3036
3037 etqf |= IGC_ETQF_FILTER_ENABLE;
3038
3039 wr32(IGC_ETQF(index), etqf);
3040
3041 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
3042 etype, queue);
3043 return 0;
3044}
3045
3046static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
3047{
3048 struct igc_hw *hw = &adapter->hw;
3049 int i;
3050
3051 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3052 u32 etqf = rd32(IGC_ETQF(i));
3053
3054 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
3055 return i;
3056 }
3057
3058 return -1;
3059}
3060
3061
3062
3063
3064
3065
3066static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
3067{
3068 struct igc_hw *hw = &adapter->hw;
3069 int index;
3070
3071 index = igc_find_etype_filter(adapter, etype);
3072 if (index < 0)
3073 return;
3074
3075 wr32(IGC_ETQF(index), 0);
3076
3077 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
3078 etype);
3079}
3080
3081static int igc_enable_nfc_rule(struct igc_adapter *adapter,
3082 const struct igc_nfc_rule *rule)
3083{
3084 int err;
3085
3086 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3087 err = igc_add_etype_filter(adapter, rule->filter.etype,
3088 rule->action);
3089 if (err)
3090 return err;
3091 }
3092
3093 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
3094 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3095 rule->filter.src_addr, rule->action);
3096 if (err)
3097 return err;
3098 }
3099
3100 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
3101 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3102 rule->filter.dst_addr, rule->action);
3103 if (err)
3104 return err;
3105 }
3106
3107 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3108 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
3109 VLAN_PRIO_SHIFT;
3110
3111 err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
3112 if (err)
3113 return err;
3114 }
3115
3116 return 0;
3117}
3118
3119static void igc_disable_nfc_rule(struct igc_adapter *adapter,
3120 const struct igc_nfc_rule *rule)
3121{
3122 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
3123 igc_del_etype_filter(adapter, rule->filter.etype);
3124
3125 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3126 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
3127 VLAN_PRIO_SHIFT;
3128
3129 igc_del_vlan_prio_filter(adapter, prio);
3130 }
3131
3132 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3133 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3134 rule->filter.src_addr);
3135
3136 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3137 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3138 rule->filter.dst_addr);
3139}
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
3151 u32 location)
3152{
3153 struct igc_nfc_rule *rule;
3154
3155 list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
3156 if (rule->location == location)
3157 return rule;
3158 if (rule->location > location)
3159 break;
3160 }
3161
3162 return NULL;
3163}
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3175{
3176 igc_disable_nfc_rule(adapter, rule);
3177
3178 list_del(&rule->list);
3179 adapter->nfc_rule_count--;
3180
3181 kfree(rule);
3182}
3183
3184static void igc_flush_nfc_rules(struct igc_adapter *adapter)
3185{
3186 struct igc_nfc_rule *rule, *tmp;
3187
3188 mutex_lock(&adapter->nfc_rule_lock);
3189
3190 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
3191 igc_del_nfc_rule(adapter, rule);
3192
3193 mutex_unlock(&adapter->nfc_rule_lock);
3194}
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3208{
3209 struct igc_nfc_rule *pred, *cur;
3210 int err;
3211
3212 err = igc_enable_nfc_rule(adapter, rule);
3213 if (err)
3214 return err;
3215
3216 pred = NULL;
3217 list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
3218 if (cur->location >= rule->location)
3219 break;
3220 pred = cur;
3221 }
3222
3223 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
3224 adapter->nfc_rule_count++;
3225 return 0;
3226}
3227
3228static void igc_restore_nfc_rules(struct igc_adapter *adapter)
3229{
3230 struct igc_nfc_rule *rule;
3231
3232 mutex_lock(&adapter->nfc_rule_lock);
3233
3234 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
3235 igc_enable_nfc_rule(adapter, rule);
3236
3237 mutex_unlock(&adapter->nfc_rule_lock);
3238}
3239
3240static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
3241{
3242 struct igc_adapter *adapter = netdev_priv(netdev);
3243
3244 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
3245}
3246
3247static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
3248{
3249 struct igc_adapter *adapter = netdev_priv(netdev);
3250
3251 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
3252 return 0;
3253}
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264static void igc_set_rx_mode(struct net_device *netdev)
3265{
3266 struct igc_adapter *adapter = netdev_priv(netdev);
3267 struct igc_hw *hw = &adapter->hw;
3268 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
3269 int count;
3270
3271
3272 if (netdev->flags & IFF_PROMISC) {
3273 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
3274 } else {
3275 if (netdev->flags & IFF_ALLMULTI) {
3276 rctl |= IGC_RCTL_MPE;
3277 } else {
3278
3279
3280
3281
3282 count = igc_write_mc_addr_list(netdev);
3283 if (count < 0)
3284 rctl |= IGC_RCTL_MPE;
3285 }
3286 }
3287
3288
3289
3290
3291
3292 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
3293 rctl |= IGC_RCTL_UPE;
3294
3295
3296 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
3297 wr32(IGC_RCTL, rctl);
3298
3299#if (PAGE_SIZE < 8192)
3300 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
3301 rlpml = IGC_MAX_FRAME_BUILD_SKB;
3302#endif
3303 wr32(IGC_RLPML, rlpml);
3304}
3305
3306
3307
3308
3309
3310static void igc_configure(struct igc_adapter *adapter)
3311{
3312 struct net_device *netdev = adapter->netdev;
3313 int i = 0;
3314
3315 igc_get_hw_control(adapter);
3316 igc_set_rx_mode(netdev);
3317
3318 igc_restore_vlan(adapter);
3319
3320 igc_setup_tctl(adapter);
3321 igc_setup_mrqc(adapter);
3322 igc_setup_rctl(adapter);
3323
3324 igc_set_default_mac_filter(adapter);
3325 igc_restore_nfc_rules(adapter);
3326
3327 igc_configure_tx(adapter);
3328 igc_configure_rx(adapter);
3329
3330 igc_rx_fifo_flush_base(&adapter->hw);
3331
3332
3333
3334
3335
3336 for (i = 0; i < adapter->num_rx_queues; i++) {
3337 struct igc_ring *ring = adapter->rx_ring[i];
3338
3339 if (ring->xsk_pool)
3340 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
3341 else
3342 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
3343 }
3344}
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
3358 int index, int offset)
3359{
3360 u32 ivar = array_rd32(IGC_IVAR0, index);
3361
3362
3363 ivar &= ~((u32)0xFF << offset);
3364
3365
3366 ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
3367
3368 array_wr32(IGC_IVAR0, index, ivar);
3369}
3370
3371static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
3372{
3373 struct igc_adapter *adapter = q_vector->adapter;
3374 struct igc_hw *hw = &adapter->hw;
3375 int rx_queue = IGC_N0_QUEUE;
3376 int tx_queue = IGC_N0_QUEUE;
3377
3378 if (q_vector->rx.ring)
3379 rx_queue = q_vector->rx.ring->reg_idx;
3380 if (q_vector->tx.ring)
3381 tx_queue = q_vector->tx.ring->reg_idx;
3382
3383 switch (hw->mac.type) {
3384 case igc_i225:
3385 if (rx_queue > IGC_N0_QUEUE)
3386 igc_write_ivar(hw, msix_vector,
3387 rx_queue >> 1,
3388 (rx_queue & 0x1) << 4);
3389 if (tx_queue > IGC_N0_QUEUE)
3390 igc_write_ivar(hw, msix_vector,
3391 tx_queue >> 1,
3392 ((tx_queue & 0x1) << 4) + 8);
3393 q_vector->eims_value = BIT(msix_vector);
3394 break;
3395 default:
3396 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
3397 break;
3398 }
3399
3400
3401 adapter->eims_enable_mask |= q_vector->eims_value;
3402
3403
3404 q_vector->set_itr = 1;
3405}
3406
3407
3408
3409
3410
3411
3412
3413
3414static void igc_configure_msix(struct igc_adapter *adapter)
3415{
3416 struct igc_hw *hw = &adapter->hw;
3417 int i, vector = 0;
3418 u32 tmp;
3419
3420 adapter->eims_enable_mask = 0;
3421
3422
3423 switch (hw->mac.type) {
3424 case igc_i225:
3425
3426
3427
3428 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
3429 IGC_GPIE_PBA | IGC_GPIE_EIAME |
3430 IGC_GPIE_NSICR);
3431
3432
3433 adapter->eims_other = BIT(vector);
3434 tmp = (vector++ | IGC_IVAR_VALID) << 8;
3435
3436 wr32(IGC_IVAR_MISC, tmp);
3437 break;
3438 default:
3439
3440 break;
3441 }
3442
3443 adapter->eims_enable_mask |= adapter->eims_other;
3444
3445 for (i = 0; i < adapter->num_q_vectors; i++)
3446 igc_assign_vector(adapter->q_vector[i], vector++);
3447
3448 wrfl();
3449}
3450
3451
3452
3453
3454
3455static void igc_irq_enable(struct igc_adapter *adapter)
3456{
3457 struct igc_hw *hw = &adapter->hw;
3458
3459 if (adapter->msix_entries) {
3460 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
3461 u32 regval = rd32(IGC_EIAC);
3462
3463 wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
3464 regval = rd32(IGC_EIAM);
3465 wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
3466 wr32(IGC_EIMS, adapter->eims_enable_mask);
3467 wr32(IGC_IMS, ims);
3468 } else {
3469 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3470 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3471 }
3472}
3473
3474
3475
3476
3477
3478static void igc_irq_disable(struct igc_adapter *adapter)
3479{
3480 struct igc_hw *hw = &adapter->hw;
3481
3482 if (adapter->msix_entries) {
3483 u32 regval = rd32(IGC_EIAM);
3484
3485 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
3486 wr32(IGC_EIMC, adapter->eims_enable_mask);
3487 regval = rd32(IGC_EIAC);
3488 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
3489 }
3490
3491 wr32(IGC_IAM, 0);
3492 wr32(IGC_IMC, ~0);
3493 wrfl();
3494
3495 if (adapter->msix_entries) {
3496 int vector = 0, i;
3497
3498 synchronize_irq(adapter->msix_entries[vector++].vector);
3499
3500 for (i = 0; i < adapter->num_q_vectors; i++)
3501 synchronize_irq(adapter->msix_entries[vector++].vector);
3502 } else {
3503 synchronize_irq(adapter->pdev->irq);
3504 }
3505}
3506
3507void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
3508 const u32 max_rss_queues)
3509{
3510
3511
3512
3513
3514 if (adapter->rss_queues > (max_rss_queues / 2))
3515 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
3516 else
3517 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
3518}
3519
3520unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
3521{
3522 return IGC_MAX_RX_QUEUES;
3523}
3524
3525static void igc_init_queue_configuration(struct igc_adapter *adapter)
3526{
3527 u32 max_rss_queues;
3528
3529 max_rss_queues = igc_get_max_rss_queues(adapter);
3530 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3531
3532 igc_set_flag_queue_pairs(adapter, max_rss_queues);
3533}
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
3544{
3545 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
3546
3547
3548
3549
3550 if (!q_vector)
3551 return;
3552
3553 if (q_vector->tx.ring)
3554 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
3555
3556 if (q_vector->rx.ring)
3557 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
3558
3559 netif_napi_del(&q_vector->napi);
3560}
3561
3562
3563
3564
3565
3566
3567
3568
3569static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
3570{
3571 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
3572
3573 adapter->q_vector[v_idx] = NULL;
3574
3575
3576
3577
3578 if (q_vector)
3579 kfree_rcu(q_vector, rcu);
3580}
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590static void igc_free_q_vectors(struct igc_adapter *adapter)
3591{
3592 int v_idx = adapter->num_q_vectors;
3593
3594 adapter->num_tx_queues = 0;
3595 adapter->num_rx_queues = 0;
3596 adapter->num_q_vectors = 0;
3597
3598 while (v_idx--) {
3599 igc_reset_q_vector(adapter, v_idx);
3600 igc_free_q_vector(adapter, v_idx);
3601 }
3602}
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619static void igc_update_itr(struct igc_q_vector *q_vector,
3620 struct igc_ring_container *ring_container)
3621{
3622 unsigned int packets = ring_container->total_packets;
3623 unsigned int bytes = ring_container->total_bytes;
3624 u8 itrval = ring_container->itr;
3625
3626
3627 if (packets == 0)
3628 return;
3629
3630 switch (itrval) {
3631 case lowest_latency:
3632
3633 if (bytes / packets > 8000)
3634 itrval = bulk_latency;
3635 else if ((packets < 5) && (bytes > 512))
3636 itrval = low_latency;
3637 break;
3638 case low_latency:
3639 if (bytes > 10000) {
3640
3641 if (bytes / packets > 8000)
3642 itrval = bulk_latency;
3643 else if ((packets < 10) || ((bytes / packets) > 1200))
3644 itrval = bulk_latency;
3645 else if ((packets > 35))
3646 itrval = lowest_latency;
3647 } else if (bytes / packets > 2000) {
3648 itrval = bulk_latency;
3649 } else if (packets <= 2 && bytes < 512) {
3650 itrval = lowest_latency;
3651 }
3652 break;
3653 case bulk_latency:
3654 if (bytes > 25000) {
3655 if (packets > 35)
3656 itrval = low_latency;
3657 } else if (bytes < 1500) {
3658 itrval = low_latency;
3659 }
3660 break;
3661 }
3662
3663
3664 ring_container->total_bytes = 0;
3665 ring_container->total_packets = 0;
3666
3667
3668 ring_container->itr = itrval;
3669}
3670
3671static void igc_set_itr(struct igc_q_vector *q_vector)
3672{
3673 struct igc_adapter *adapter = q_vector->adapter;
3674 u32 new_itr = q_vector->itr_val;
3675 u8 current_itr = 0;
3676
3677
3678 switch (adapter->link_speed) {
3679 case SPEED_10:
3680 case SPEED_100:
3681 current_itr = 0;
3682 new_itr = IGC_4K_ITR;
3683 goto set_itr_now;
3684 default:
3685 break;
3686 }
3687
3688 igc_update_itr(q_vector, &q_vector->tx);
3689 igc_update_itr(q_vector, &q_vector->rx);
3690
3691 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
3692
3693
3694 if (current_itr == lowest_latency &&
3695 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3696 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3697 current_itr = low_latency;
3698
3699 switch (current_itr) {
3700
3701 case lowest_latency:
3702 new_itr = IGC_70K_ITR;
3703 break;
3704 case low_latency:
3705 new_itr = IGC_20K_ITR;
3706 break;
3707 case bulk_latency:
3708 new_itr = IGC_4K_ITR;
3709 break;
3710 default:
3711 break;
3712 }
3713
3714set_itr_now:
3715 if (new_itr != q_vector->itr_val) {
3716
3717
3718
3719
3720 new_itr = new_itr > q_vector->itr_val ?
3721 max((new_itr * q_vector->itr_val) /
3722 (new_itr + (q_vector->itr_val >> 2)),
3723 new_itr) : new_itr;
3724
3725
3726
3727
3728
3729
3730 q_vector->itr_val = new_itr;
3731 q_vector->set_itr = 1;
3732 }
3733}
3734
3735static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
3736{
3737 int v_idx = adapter->num_q_vectors;
3738
3739 if (adapter->msix_entries) {
3740 pci_disable_msix(adapter->pdev);
3741 kfree(adapter->msix_entries);
3742 adapter->msix_entries = NULL;
3743 } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
3744 pci_disable_msi(adapter->pdev);
3745 }
3746
3747 while (v_idx--)
3748 igc_reset_q_vector(adapter, v_idx);
3749}
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759static void igc_set_interrupt_capability(struct igc_adapter *adapter,
3760 bool msix)
3761{
3762 int numvecs, i;
3763 int err;
3764
3765 if (!msix)
3766 goto msi_only;
3767 adapter->flags |= IGC_FLAG_HAS_MSIX;
3768
3769
3770 adapter->num_rx_queues = adapter->rss_queues;
3771
3772 adapter->num_tx_queues = adapter->rss_queues;
3773
3774
3775 numvecs = adapter->num_rx_queues;
3776
3777
3778 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
3779 numvecs += adapter->num_tx_queues;
3780
3781
3782 adapter->num_q_vectors = numvecs;
3783
3784
3785 numvecs++;
3786
3787 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
3788 GFP_KERNEL);
3789
3790 if (!adapter->msix_entries)
3791 return;
3792
3793
3794 for (i = 0; i < numvecs; i++)
3795 adapter->msix_entries[i].entry = i;
3796
3797 err = pci_enable_msix_range(adapter->pdev,
3798 adapter->msix_entries,
3799 numvecs,
3800 numvecs);
3801 if (err > 0)
3802 return;
3803
3804 kfree(adapter->msix_entries);
3805 adapter->msix_entries = NULL;
3806
3807 igc_reset_interrupt_capability(adapter);
3808
3809msi_only:
3810 adapter->flags &= ~IGC_FLAG_HAS_MSIX;
3811
3812 adapter->rss_queues = 1;
3813 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
3814 adapter->num_rx_queues = 1;
3815 adapter->num_tx_queues = 1;
3816 adapter->num_q_vectors = 1;
3817 if (!pci_enable_msi(adapter->pdev))
3818 adapter->flags |= IGC_FLAG_HAS_MSI;
3819}
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835static void igc_update_ring_itr(struct igc_q_vector *q_vector)
3836{
3837 struct igc_adapter *adapter = q_vector->adapter;
3838 int new_val = q_vector->itr_val;
3839 int avg_wire_size = 0;
3840 unsigned int packets;
3841
3842
3843
3844
3845 switch (adapter->link_speed) {
3846 case SPEED_10:
3847 case SPEED_100:
3848 new_val = IGC_4K_ITR;
3849 goto set_itr_val;
3850 default:
3851 break;
3852 }
3853
3854 packets = q_vector->rx.total_packets;
3855 if (packets)
3856 avg_wire_size = q_vector->rx.total_bytes / packets;
3857
3858 packets = q_vector->tx.total_packets;
3859 if (packets)
3860 avg_wire_size = max_t(u32, avg_wire_size,
3861 q_vector->tx.total_bytes / packets);
3862
3863
3864 if (!avg_wire_size)
3865 goto clear_counts;
3866
3867
3868 avg_wire_size += 24;
3869
3870
3871 avg_wire_size = min(avg_wire_size, 3000);
3872
3873
3874 if (avg_wire_size > 300 && avg_wire_size < 1200)
3875 new_val = avg_wire_size / 3;
3876 else
3877 new_val = avg_wire_size / 2;
3878
3879
3880 if (new_val < IGC_20K_ITR &&
3881 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3882 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3883 new_val = IGC_20K_ITR;
3884
3885set_itr_val:
3886 if (new_val != q_vector->itr_val) {
3887 q_vector->itr_val = new_val;
3888 q_vector->set_itr = 1;
3889 }
3890clear_counts:
3891 q_vector->rx.total_bytes = 0;
3892 q_vector->rx.total_packets = 0;
3893 q_vector->tx.total_bytes = 0;
3894 q_vector->tx.total_packets = 0;
3895}
3896
3897static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
3898{
3899 struct igc_adapter *adapter = q_vector->adapter;
3900 struct igc_hw *hw = &adapter->hw;
3901
3902 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
3903 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
3904 if (adapter->num_q_vectors == 1)
3905 igc_set_itr(q_vector);
3906 else
3907 igc_update_ring_itr(q_vector);
3908 }
3909
3910 if (!test_bit(__IGC_DOWN, &adapter->state)) {
3911 if (adapter->msix_entries)
3912 wr32(IGC_EIMS, q_vector->eims_value);
3913 else
3914 igc_irq_enable(adapter);
3915 }
3916}
3917
3918static void igc_add_ring(struct igc_ring *ring,
3919 struct igc_ring_container *head)
3920{
3921 head->ring = ring;
3922 head->count++;
3923}
3924
3925
3926
3927
3928
3929
3930
3931
3932static void igc_cache_ring_register(struct igc_adapter *adapter)
3933{
3934 int i = 0, j = 0;
3935
3936 switch (adapter->hw.mac.type) {
3937 case igc_i225:
3938 default:
3939 for (; i < adapter->num_rx_queues; i++)
3940 adapter->rx_ring[i]->reg_idx = i;
3941 for (; j < adapter->num_tx_queues; j++)
3942 adapter->tx_ring[j]->reg_idx = j;
3943 break;
3944 }
3945}
3946
3947
3948
3949
3950
3951
3952static int igc_poll(struct napi_struct *napi, int budget)
3953{
3954 struct igc_q_vector *q_vector = container_of(napi,
3955 struct igc_q_vector,
3956 napi);
3957 struct igc_ring *rx_ring = q_vector->rx.ring;
3958 bool clean_complete = true;
3959 int work_done = 0;
3960
3961 if (q_vector->tx.ring)
3962 clean_complete = igc_clean_tx_irq(q_vector, budget);
3963
3964 if (rx_ring) {
3965 int cleaned = rx_ring->xsk_pool ?
3966 igc_clean_rx_irq_zc(q_vector, budget) :
3967 igc_clean_rx_irq(q_vector, budget);
3968
3969 work_done += cleaned;
3970 if (cleaned >= budget)
3971 clean_complete = false;
3972 }
3973
3974
3975 if (!clean_complete)
3976 return budget;
3977
3978
3979
3980
3981 if (likely(napi_complete_done(napi, work_done)))
3982 igc_ring_irq_enable(q_vector);
3983
3984 return min(work_done, budget - 1);
3985}
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999static int igc_alloc_q_vector(struct igc_adapter *adapter,
4000 unsigned int v_count, unsigned int v_idx,
4001 unsigned int txr_count, unsigned int txr_idx,
4002 unsigned int rxr_count, unsigned int rxr_idx)
4003{
4004 struct igc_q_vector *q_vector;
4005 struct igc_ring *ring;
4006 int ring_count;
4007
4008
4009 if (txr_count > 1 || rxr_count > 1)
4010 return -ENOMEM;
4011
4012 ring_count = txr_count + rxr_count;
4013
4014
4015 q_vector = adapter->q_vector[v_idx];
4016 if (!q_vector)
4017 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
4018 GFP_KERNEL);
4019 else
4020 memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
4021 if (!q_vector)
4022 return -ENOMEM;
4023
4024
4025 netif_napi_add(adapter->netdev, &q_vector->napi,
4026 igc_poll, 64);
4027
4028
4029 adapter->q_vector[v_idx] = q_vector;
4030 q_vector->adapter = adapter;
4031
4032
4033 q_vector->tx.work_limit = adapter->tx_work_limit;
4034
4035
4036 q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
4037 q_vector->itr_val = IGC_START_ITR;
4038
4039
4040 ring = q_vector->ring;
4041
4042
4043 if (rxr_count) {
4044
4045 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
4046 q_vector->itr_val = adapter->rx_itr_setting;
4047 } else {
4048
4049 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
4050 q_vector->itr_val = adapter->tx_itr_setting;
4051 }
4052
4053 if (txr_count) {
4054
4055 ring->dev = &adapter->pdev->dev;
4056 ring->netdev = adapter->netdev;
4057
4058
4059 ring->q_vector = q_vector;
4060
4061
4062 igc_add_ring(ring, &q_vector->tx);
4063
4064
4065 ring->count = adapter->tx_ring_count;
4066 ring->queue_index = txr_idx;
4067
4068
4069 adapter->tx_ring[txr_idx] = ring;
4070
4071
4072 ring++;
4073 }
4074
4075 if (rxr_count) {
4076
4077 ring->dev = &adapter->pdev->dev;
4078 ring->netdev = adapter->netdev;
4079
4080
4081 ring->q_vector = q_vector;
4082
4083
4084 igc_add_ring(ring, &q_vector->rx);
4085
4086
4087 ring->count = adapter->rx_ring_count;
4088 ring->queue_index = rxr_idx;
4089
4090
4091 adapter->rx_ring[rxr_idx] = ring;
4092 }
4093
4094 return 0;
4095}
4096
4097
4098
4099
4100
4101
4102
4103
4104static int igc_alloc_q_vectors(struct igc_adapter *adapter)
4105{
4106 int rxr_remaining = adapter->num_rx_queues;
4107 int txr_remaining = adapter->num_tx_queues;
4108 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
4109 int q_vectors = adapter->num_q_vectors;
4110 int err;
4111
4112 if (q_vectors >= (rxr_remaining + txr_remaining)) {
4113 for (; rxr_remaining; v_idx++) {
4114 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4115 0, 0, 1, rxr_idx);
4116
4117 if (err)
4118 goto err_out;
4119
4120
4121 rxr_remaining--;
4122 rxr_idx++;
4123 }
4124 }
4125
4126 for (; v_idx < q_vectors; v_idx++) {
4127 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
4128 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
4129
4130 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4131 tqpv, txr_idx, rqpv, rxr_idx);
4132
4133 if (err)
4134 goto err_out;
4135
4136
4137 rxr_remaining -= rqpv;
4138 txr_remaining -= tqpv;
4139 rxr_idx++;
4140 txr_idx++;
4141 }
4142
4143 return 0;
4144
4145err_out:
4146 adapter->num_tx_queues = 0;
4147 adapter->num_rx_queues = 0;
4148 adapter->num_q_vectors = 0;
4149
4150 while (v_idx--)
4151 igc_free_q_vector(adapter, v_idx);
4152
4153 return -ENOMEM;
4154}
4155
4156
4157
4158
4159
4160
4161
4162
4163static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
4164{
4165 struct net_device *dev = adapter->netdev;
4166 int err = 0;
4167
4168 igc_set_interrupt_capability(adapter, msix);
4169
4170 err = igc_alloc_q_vectors(adapter);
4171 if (err) {
4172 netdev_err(dev, "Unable to allocate memory for vectors\n");
4173 goto err_alloc_q_vectors;
4174 }
4175
4176 igc_cache_ring_register(adapter);
4177
4178 return 0;
4179
4180err_alloc_q_vectors:
4181 igc_reset_interrupt_capability(adapter);
4182 return err;
4183}
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193static int igc_sw_init(struct igc_adapter *adapter)
4194{
4195 struct net_device *netdev = adapter->netdev;
4196 struct pci_dev *pdev = adapter->pdev;
4197 struct igc_hw *hw = &adapter->hw;
4198
4199 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
4200
4201
4202 adapter->tx_r