1
2
3
4
5
6
7
8
9
10
11#include <linux/pci.h>
12#include <linux/ethtool.h>
13#include <linux/stddef.h>
14#include <linux/etherdevice.h>
15#include <linux/log2.h>
16#include <linux/net_tstamp.h>
17#include <linux/linkmode.h>
18
19#include "otx2_common.h"
20#include "otx2_ptp.h"
21
22#define DRV_NAME "octeontx2-nicpf"
23#define DRV_VF_NAME "octeontx2-nicvf"
24
25struct otx2_stat {
26 char name[ETH_GSTRING_LEN];
27 unsigned int index;
28};
29
30
31#define OTX2_DEV_STAT(stat) { \
32 .name = #stat, \
33 .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
34}
35
36
37#define OTX2_ETHTOOL_SUPPORTED_MODES 0x638CCBF
38
39enum link_mode {
40 OTX2_MODE_SUPPORTED,
41 OTX2_MODE_ADVERTISED
42};
43
44static const struct otx2_stat otx2_dev_stats[] = {
45 OTX2_DEV_STAT(rx_ucast_frames),
46 OTX2_DEV_STAT(rx_bcast_frames),
47 OTX2_DEV_STAT(rx_mcast_frames),
48
49 OTX2_DEV_STAT(tx_ucast_frames),
50 OTX2_DEV_STAT(tx_bcast_frames),
51 OTX2_DEV_STAT(tx_mcast_frames),
52};
53
54
55#define OTX2_DRV_STAT(stat) { \
56 .name = #stat, \
57 .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \
58}
59
60static const struct otx2_stat otx2_drv_stats[] = {
61 OTX2_DRV_STAT(rx_fcs_errs),
62 OTX2_DRV_STAT(rx_oversize_errs),
63 OTX2_DRV_STAT(rx_undersize_errs),
64 OTX2_DRV_STAT(rx_csum_errs),
65 OTX2_DRV_STAT(rx_len_errs),
66 OTX2_DRV_STAT(rx_other_errs),
67};
68
69static const struct otx2_stat otx2_queue_stats[] = {
70 { "bytes", 0 },
71 { "frames", 1 },
72};
73
74static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
75static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
76static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
77
78static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf);
79
80static void otx2_get_drvinfo(struct net_device *netdev,
81 struct ethtool_drvinfo *info)
82{
83 struct otx2_nic *pfvf = netdev_priv(netdev);
84
85 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
86 strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
87}
88
89static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
90{
91 int start_qidx = qset * pfvf->hw.rx_queues;
92 int qidx, stats;
93
94 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
95 for (stats = 0; stats < otx2_n_queue_stats; stats++) {
96 sprintf(*data, "rxq%d: %s", qidx + start_qidx,
97 otx2_queue_stats[stats].name);
98 *data += ETH_GSTRING_LEN;
99 }
100 }
101 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
102 for (stats = 0; stats < otx2_n_queue_stats; stats++) {
103 sprintf(*data, "txq%d: %s", qidx + start_qidx,
104 otx2_queue_stats[stats].name);
105 *data += ETH_GSTRING_LEN;
106 }
107 }
108}
109
110static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
111{
112 struct otx2_nic *pfvf = netdev_priv(netdev);
113 int stats;
114
115 if (sset != ETH_SS_STATS)
116 return;
117
118 for (stats = 0; stats < otx2_n_dev_stats; stats++) {
119 memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
120 data += ETH_GSTRING_LEN;
121 }
122
123 for (stats = 0; stats < otx2_n_drv_stats; stats++) {
124 memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
125 data += ETH_GSTRING_LEN;
126 }
127
128 otx2_get_qset_strings(pfvf, &data, 0);
129
130 for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
131 sprintf(data, "cgx_rxstat%d: ", stats);
132 data += ETH_GSTRING_LEN;
133 }
134
135 for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
136 sprintf(data, "cgx_txstat%d: ", stats);
137 data += ETH_GSTRING_LEN;
138 }
139
140 strcpy(data, "reset_count");
141 data += ETH_GSTRING_LEN;
142 sprintf(data, "Fec Corrected Errors: ");
143 data += ETH_GSTRING_LEN;
144 sprintf(data, "Fec Uncorrected Errors: ");
145 data += ETH_GSTRING_LEN;
146}
147
148static void otx2_get_qset_stats(struct otx2_nic *pfvf,
149 struct ethtool_stats *stats, u64 **data)
150{
151 int stat, qidx;
152
153 if (!pfvf)
154 return;
155 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
156 if (!otx2_update_rq_stats(pfvf, qidx)) {
157 for (stat = 0; stat < otx2_n_queue_stats; stat++)
158 *((*data)++) = 0;
159 continue;
160 }
161 for (stat = 0; stat < otx2_n_queue_stats; stat++)
162 *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats)
163 [otx2_queue_stats[stat].index];
164 }
165
166 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
167 if (!otx2_update_sq_stats(pfvf, qidx)) {
168 for (stat = 0; stat < otx2_n_queue_stats; stat++)
169 *((*data)++) = 0;
170 continue;
171 }
172 for (stat = 0; stat < otx2_n_queue_stats; stat++)
173 *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats)
174 [otx2_queue_stats[stat].index];
175 }
176}
177
178static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf)
179{
180 struct msg_req *req;
181 int rc = -ENOMEM;
182
183 mutex_lock(&pfvf->mbox.lock);
184 req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox);
185 if (!req)
186 goto end;
187
188 if (!otx2_sync_mbox_msg(&pfvf->mbox))
189 rc = 0;
190end:
191 mutex_unlock(&pfvf->mbox.lock);
192 return rc;
193}
194
195
196static void otx2_get_ethtool_stats(struct net_device *netdev,
197 struct ethtool_stats *stats, u64 *data)
198{
199 struct otx2_nic *pfvf = netdev_priv(netdev);
200 u64 fec_corr_blks, fec_uncorr_blks;
201 struct cgx_fw_data *rsp;
202 int stat;
203
204 otx2_get_dev_stats(pfvf);
205 for (stat = 0; stat < otx2_n_dev_stats; stat++)
206 *(data++) = ((u64 *)&pfvf->hw.dev_stats)
207 [otx2_dev_stats[stat].index];
208
209 for (stat = 0; stat < otx2_n_drv_stats; stat++)
210 *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats)
211 [otx2_drv_stats[stat].index]);
212
213 otx2_get_qset_stats(pfvf, stats, &data);
214 otx2_update_lmac_stats(pfvf);
215 for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
216 *(data++) = pfvf->hw.cgx_rx_stats[stat];
217 for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
218 *(data++) = pfvf->hw.cgx_tx_stats[stat];
219 *(data++) = pfvf->reset_count;
220
221 fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
222 fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks;
223
224 rsp = otx2_get_fwdata(pfvf);
225 if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
226 !otx2_get_phy_fec_stats(pfvf)) {
227
228
229
230 rsp = otx2_get_fwdata(pfvf);
231 if (!IS_ERR(rsp)) {
232 struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
233
234 if (pfvf->linfo.fec == OTX2_FEC_BASER) {
235 fec_corr_blks = p->brfec_corr_blks;
236 fec_uncorr_blks = p->brfec_uncorr_blks;
237 } else {
238 fec_corr_blks = p->rsfec_corr_cws;
239 fec_uncorr_blks = p->rsfec_uncorr_cws;
240 }
241 }
242 }
243
244 *(data++) = fec_corr_blks;
245 *(data++) = fec_uncorr_blks;
246}
247
248static int otx2_get_sset_count(struct net_device *netdev, int sset)
249{
250 struct otx2_nic *pfvf = netdev_priv(netdev);
251 int qstats_count;
252
253 if (sset != ETH_SS_STATS)
254 return -EINVAL;
255
256 qstats_count = otx2_n_queue_stats *
257 (pfvf->hw.rx_queues + pfvf->hw.tx_queues);
258 otx2_update_lmac_fec_stats(pfvf);
259
260 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
261 CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + OTX2_FEC_STATS_CNT
262 + 1;
263}
264
265
266static void otx2_get_channels(struct net_device *dev,
267 struct ethtool_channels *channel)
268{
269 struct otx2_nic *pfvf = netdev_priv(dev);
270
271 channel->max_rx = pfvf->hw.max_queues;
272 channel->max_tx = pfvf->hw.max_queues;
273
274 channel->rx_count = pfvf->hw.rx_queues;
275 channel->tx_count = pfvf->hw.tx_queues;
276}
277
278
279static int otx2_set_channels(struct net_device *dev,
280 struct ethtool_channels *channel)
281{
282 struct otx2_nic *pfvf = netdev_priv(dev);
283 bool if_up = netif_running(dev);
284 int err = 0;
285
286 if (!channel->rx_count || !channel->tx_count)
287 return -EINVAL;
288
289 if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) {
290 netdev_err(dev,
291 "Receive queues are in use by TC police action\n");
292 return -EINVAL;
293 }
294
295 if (if_up)
296 dev->netdev_ops->ndo_stop(dev);
297
298 err = otx2_set_real_num_queues(dev, channel->tx_count,
299 channel->rx_count);
300 if (err)
301 return err;
302
303 pfvf->hw.rx_queues = channel->rx_count;
304 pfvf->hw.tx_queues = channel->tx_count;
305 pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues;
306
307 if (if_up)
308 err = dev->netdev_ops->ndo_open(dev);
309
310 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
311 pfvf->hw.tx_queues, pfvf->hw.rx_queues);
312
313 return err;
314}
315
316static void otx2_get_pauseparam(struct net_device *netdev,
317 struct ethtool_pauseparam *pause)
318{
319 struct otx2_nic *pfvf = netdev_priv(netdev);
320 struct cgx_pause_frm_cfg *req, *rsp;
321
322 if (is_otx2_lbkvf(pfvf->pdev))
323 return;
324
325 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
326 if (!req)
327 return;
328
329 if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
330 rsp = (struct cgx_pause_frm_cfg *)
331 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
332 pause->rx_pause = rsp->rx_pause;
333 pause->tx_pause = rsp->tx_pause;
334 }
335}
336
337static int otx2_set_pauseparam(struct net_device *netdev,
338 struct ethtool_pauseparam *pause)
339{
340 struct otx2_nic *pfvf = netdev_priv(netdev);
341
342 if (pause->autoneg)
343 return -EOPNOTSUPP;
344
345 if (is_otx2_lbkvf(pfvf->pdev))
346 return -EOPNOTSUPP;
347
348 if (pause->rx_pause)
349 pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
350 else
351 pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
352
353 if (pause->tx_pause)
354 pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
355 else
356 pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
357
358 return otx2_config_pause_frm(pfvf);
359}
360
361static void otx2_get_ringparam(struct net_device *netdev,
362 struct ethtool_ringparam *ring)
363{
364 struct otx2_nic *pfvf = netdev_priv(netdev);
365 struct otx2_qset *qs = &pfvf->qset;
366
367 ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX);
368 ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256);
369 ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
370 ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
371}
372
373static int otx2_set_ringparam(struct net_device *netdev,
374 struct ethtool_ringparam *ring)
375{
376 struct otx2_nic *pfvf = netdev_priv(netdev);
377 bool if_up = netif_running(netdev);
378 struct otx2_qset *qs = &pfvf->qset;
379 u32 rx_count, tx_count;
380
381 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
382 return -EINVAL;
383
384
385 rx_count = ring->rx_pending;
386
387
388
389 if (rx_count < pfvf->hw.rq_skid)
390 rx_count = pfvf->hw.rq_skid;
391 rx_count = Q_COUNT(Q_SIZE(rx_count, 3));
392
393
394
395
396
397 tx_count = clamp_t(u32, ring->tx_pending,
398 Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX));
399 tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
400
401 if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt)
402 return 0;
403
404 if (if_up)
405 netdev->netdev_ops->ndo_stop(netdev);
406
407
408 qs->sqe_cnt = tx_count;
409 qs->rqe_cnt = rx_count;
410
411 if (if_up)
412 return netdev->netdev_ops->ndo_open(netdev);
413
414 return 0;
415}
416
417static int otx2_get_coalesce(struct net_device *netdev,
418 struct ethtool_coalesce *cmd)
419{
420 struct otx2_nic *pfvf = netdev_priv(netdev);
421 struct otx2_hw *hw = &pfvf->hw;
422
423 cmd->rx_coalesce_usecs = hw->cq_time_wait;
424 cmd->rx_max_coalesced_frames = hw->cq_ecount_wait;
425 cmd->tx_coalesce_usecs = hw->cq_time_wait;
426 cmd->tx_max_coalesced_frames = hw->cq_ecount_wait;
427
428 return 0;
429}
430
431static int otx2_set_coalesce(struct net_device *netdev,
432 struct ethtool_coalesce *ec)
433{
434 struct otx2_nic *pfvf = netdev_priv(netdev);
435 struct otx2_hw *hw = &pfvf->hw;
436 int qidx;
437
438 if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
439 return 0;
440
441
442
443
444 ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs,
445 1, CQ_TIMER_THRESH_MAX);
446 ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs,
447 1, CQ_TIMER_THRESH_MAX);
448
449
450
451
452 if (hw->cq_time_wait == ec->rx_coalesce_usecs)
453 hw->cq_time_wait = ec->tx_coalesce_usecs;
454 else if (hw->cq_time_wait == ec->tx_coalesce_usecs)
455 hw->cq_time_wait = ec->rx_coalesce_usecs;
456 else
457 hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs,
458 ec->tx_coalesce_usecs);
459
460
461
462
463 ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames,
464 1, U16_MAX);
465 ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames,
466 1, U16_MAX);
467
468
469
470
471 if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames)
472 hw->cq_ecount_wait = ec->tx_max_coalesced_frames;
473 else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames)
474 hw->cq_ecount_wait = ec->rx_max_coalesced_frames;
475 else
476 hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames,
477 ec->tx_max_coalesced_frames);
478
479 if (netif_running(netdev)) {
480 for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++)
481 otx2_config_irq_coalescing(pfvf, qidx);
482 }
483
484 return 0;
485}
486
487static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
488 struct ethtool_rxnfc *nfc)
489{
490 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
491
492 if (!(rss->flowkey_cfg &
493 (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)))
494 return 0;
495
496
497 nfc->data = RXH_IP_SRC | RXH_IP_DST;
498 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN)
499 nfc->data |= RXH_VLAN;
500
501 switch (nfc->flow_type) {
502 case TCP_V4_FLOW:
503 case TCP_V6_FLOW:
504 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP)
505 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
506 break;
507 case UDP_V4_FLOW:
508 case UDP_V6_FLOW:
509 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP)
510 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
511 break;
512 case SCTP_V4_FLOW:
513 case SCTP_V6_FLOW:
514 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP)
515 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
516 break;
517 case AH_ESP_V4_FLOW:
518 case AH_ESP_V6_FLOW:
519 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP)
520 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
521 break;
522 case AH_V4_FLOW:
523 case ESP_V4_FLOW:
524 case IPV4_FLOW:
525 break;
526 case AH_V6_FLOW:
527 case ESP_V6_FLOW:
528 case IPV6_FLOW:
529 break;
530 default:
531 return -EINVAL;
532 }
533
534 return 0;
535}
536
537static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
538 struct ethtool_rxnfc *nfc)
539{
540 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
541 u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3;
542 u32 rss_cfg = rss->flowkey_cfg;
543
544 if (!rss->enable) {
545 netdev_err(pfvf->netdev,
546 "RSS is disabled, cannot change settings\n");
547 return -EIO;
548 }
549
550
551 if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST))
552 return -EINVAL;
553
554 if (nfc->data & RXH_VLAN)
555 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN;
556 else
557 rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN;
558
559 switch (nfc->flow_type) {
560 case TCP_V4_FLOW:
561 case TCP_V6_FLOW:
562
563
564
565 switch (nfc->data & rxh_l4) {
566 case 0:
567 rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP;
568 break;
569 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
570 rss_cfg |= NIX_FLOW_KEY_TYPE_TCP;
571 break;
572 default:
573 return -EINVAL;
574 }
575 break;
576 case UDP_V4_FLOW:
577 case UDP_V6_FLOW:
578 switch (nfc->data & rxh_l4) {
579 case 0:
580 rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP;
581 break;
582 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
583 rss_cfg |= NIX_FLOW_KEY_TYPE_UDP;
584 break;
585 default:
586 return -EINVAL;
587 }
588 break;
589 case SCTP_V4_FLOW:
590 case SCTP_V6_FLOW:
591 switch (nfc->data & rxh_l4) {
592 case 0:
593 rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP;
594 break;
595 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
596 rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP;
597 break;
598 default:
599 return -EINVAL;
600 }
601 break;
602 case AH_ESP_V4_FLOW:
603 case AH_ESP_V6_FLOW:
604 switch (nfc->data & rxh_l4) {
605 case 0:
606 rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP |
607 NIX_FLOW_KEY_TYPE_AH);
608 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN |
609 NIX_FLOW_KEY_TYPE_IPV4_PROTO;
610 break;
611 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
612
613
614
615 if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) {
616 netdev_err(pfvf->netdev,
617 "RSS hash of ESP or AH with VLAN is not supported\n");
618 return -EOPNOTSUPP;
619 }
620
621 rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH;
622
623
624
625
626 rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO;
627 break;
628 default:
629 return -EINVAL;
630 }
631 break;
632 case IPV4_FLOW:
633 case IPV6_FLOW:
634 rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
635 break;
636 default:
637 return -EINVAL;
638 }
639
640 rss->flowkey_cfg = rss_cfg;
641 otx2_set_flowkey_cfg(pfvf);
642 return 0;
643}
644
645static int otx2_get_rxnfc(struct net_device *dev,
646 struct ethtool_rxnfc *nfc, u32 *rules)
647{
648 struct otx2_nic *pfvf = netdev_priv(dev);
649 int ret = -EOPNOTSUPP;
650
651 switch (nfc->cmd) {
652 case ETHTOOL_GRXRINGS:
653 nfc->data = pfvf->hw.rx_queues;
654 ret = 0;
655 break;
656 case ETHTOOL_GRXCLSRLCNT:
657 nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
658 ret = 0;
659 break;
660 case ETHTOOL_GRXCLSRULE:
661 ret = otx2_get_flow(pfvf, nfc, nfc->fs.location);
662 break;
663 case ETHTOOL_GRXCLSRLALL:
664 ret = otx2_get_all_flows(pfvf, nfc, rules);
665 break;
666 case ETHTOOL_GRXFH:
667 return otx2_get_rss_hash_opts(pfvf, nfc);
668 default:
669 break;
670 }
671 return ret;
672}
673
674static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
675{
676 bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
677 struct otx2_nic *pfvf = netdev_priv(dev);
678 int ret = -EOPNOTSUPP;
679
680 switch (nfc->cmd) {
681 case ETHTOOL_SRXFH:
682 ret = otx2_set_rss_hash_opts(pfvf, nfc);
683 break;
684 case ETHTOOL_SRXCLSRLINS:
685 if (netif_running(dev) && ntuple)
686 ret = otx2_add_flow(pfvf, nfc);
687 break;
688 case ETHTOOL_SRXCLSRLDEL:
689 if (netif_running(dev) && ntuple)
690 ret = otx2_remove_flow(pfvf, nfc->fs.location);
691 break;
692 default:
693 break;
694 }
695
696 return ret;
697}
698
699static int otx2vf_get_rxnfc(struct net_device *dev,
700 struct ethtool_rxnfc *nfc, u32 *rules)
701{
702 struct otx2_nic *pfvf = netdev_priv(dev);
703 int ret = -EOPNOTSUPP;
704
705 switch (nfc->cmd) {
706 case ETHTOOL_GRXRINGS:
707 nfc->data = pfvf->hw.rx_queues;
708 ret = 0;
709 break;
710 case ETHTOOL_GRXFH:
711 return otx2_get_rss_hash_opts(pfvf, nfc);
712 default:
713 break;
714 }
715 return ret;
716}
717
718static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
719{
720 struct otx2_nic *pfvf = netdev_priv(dev);
721 int ret = -EOPNOTSUPP;
722
723 switch (nfc->cmd) {
724 case ETHTOOL_SRXFH:
725 ret = otx2_set_rss_hash_opts(pfvf, nfc);
726 break;
727 default:
728 break;
729 }
730
731 return ret;
732}
733
734static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
735{
736 struct otx2_nic *pfvf = netdev_priv(netdev);
737 struct otx2_rss_info *rss;
738
739 rss = &pfvf->hw.rss_info;
740
741 return sizeof(rss->key);
742}
743
744static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
745{
746 return MAX_RSS_INDIR_TBL_SIZE;
747}
748
749static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id)
750{
751 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
752
753 otx2_rss_ctx_flow_del(pfvf, ctx_id);
754 kfree(rss->rss_ctx[ctx_id]);
755 rss->rss_ctx[ctx_id] = NULL;
756
757 return 0;
758}
759
760static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
761 u32 *rss_context)
762{
763 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
764 u8 ctx;
765
766 for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) {
767 if (!rss->rss_ctx[ctx])
768 break;
769 }
770 if (ctx == MAX_RSS_GROUPS)
771 return -EINVAL;
772
773 rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL);
774 if (!rss->rss_ctx[ctx])
775 return -ENOMEM;
776 *rss_context = ctx;
777
778 return 0;
779}
780
781
782static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
783 const u8 *hkey, const u8 hfunc,
784 u32 *rss_context, bool delete)
785{
786 struct otx2_nic *pfvf = netdev_priv(dev);
787 struct otx2_rss_ctx *rss_ctx;
788 struct otx2_rss_info *rss;
789 int ret, idx;
790
791 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
792 return -EOPNOTSUPP;
793
794 if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
795 *rss_context >= MAX_RSS_GROUPS)
796 return -EINVAL;
797
798 rss = &pfvf->hw.rss_info;
799
800 if (!rss->enable) {
801 netdev_err(dev, "RSS is disabled, cannot change settings\n");
802 return -EIO;
803 }
804
805 if (hkey) {
806 memcpy(rss->key, hkey, sizeof(rss->key));
807 otx2_set_rss_key(pfvf);
808 }
809 if (delete)
810 return otx2_rss_ctx_delete(pfvf, *rss_context);
811
812 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
813 ret = otx2_rss_ctx_create(pfvf, rss_context);
814 if (ret)
815 return ret;
816 }
817 if (indir) {
818 rss_ctx = rss->rss_ctx[*rss_context];
819 for (idx = 0; idx < rss->rss_size; idx++)
820 rss_ctx->ind_tbl[idx] = indir[idx];
821 }
822 otx2_set_rss_table(pfvf, *rss_context);
823
824 return 0;
825}
826
827static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
828 u8 *hkey, u8 *hfunc, u32 rss_context)
829{
830 struct otx2_nic *pfvf = netdev_priv(dev);
831 struct otx2_rss_ctx *rss_ctx;
832 struct otx2_rss_info *rss;
833 int idx, rx_queues;
834
835 rss = &pfvf->hw.rss_info;
836
837 if (hfunc)
838 *hfunc = ETH_RSS_HASH_TOP;
839
840 if (!indir)
841 return 0;
842
843 if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) {
844 rx_queues = pfvf->hw.rx_queues;
845 for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
846 indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
847 return 0;
848 }
849 if (rss_context >= MAX_RSS_GROUPS)
850 return -ENOENT;
851
852 rss_ctx = rss->rss_ctx[rss_context];
853 if (!rss_ctx)
854 return -ENOENT;
855
856 if (indir) {
857 for (idx = 0; idx < rss->rss_size; idx++)
858 indir[idx] = rss_ctx->ind_tbl[idx];
859 }
860 if (hkey)
861 memcpy(hkey, rss->key, sizeof(rss->key));
862
863 return 0;
864}
865
866
867static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
868 u8 *hkey, u8 *hfunc)
869{
870 return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
871 DEFAULT_RSS_CONTEXT_GROUP);
872}
873
874
875static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
876 const u8 *hkey, const u8 hfunc)
877{
878
879 u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
880
881 return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
882}
883
884static u32 otx2_get_msglevel(struct net_device *netdev)
885{
886 struct otx2_nic *pfvf = netdev_priv(netdev);
887
888 return pfvf->msg_enable;
889}
890
891static void otx2_set_msglevel(struct net_device *netdev, u32 val)
892{
893 struct otx2_nic *pfvf = netdev_priv(netdev);
894
895 pfvf->msg_enable = val;
896}
897
898static u32 otx2_get_link(struct net_device *netdev)
899{
900 struct otx2_nic *pfvf = netdev_priv(netdev);
901
902
903 if (is_otx2_lbkvf(pfvf->pdev))
904 return 1;
905 return pfvf->linfo.link_up;
906}
907
908static int otx2_get_ts_info(struct net_device *netdev,
909 struct ethtool_ts_info *info)
910{
911 struct otx2_nic *pfvf = netdev_priv(netdev);
912
913 if (!pfvf->ptp)
914 return ethtool_op_get_ts_info(netdev, info);
915
916 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
917 SOF_TIMESTAMPING_RX_SOFTWARE |
918 SOF_TIMESTAMPING_SOFTWARE |
919 SOF_TIMESTAMPING_TX_HARDWARE |
920 SOF_TIMESTAMPING_RX_HARDWARE |
921 SOF_TIMESTAMPING_RAW_HARDWARE;
922
923 info->phc_index = otx2_ptp_clock_index(pfvf);
924
925 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
926
927 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
928 (1 << HWTSTAMP_FILTER_ALL);
929
930 return 0;
931}
932
933static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf)
934{
935 struct cgx_fw_data *rsp = NULL;
936 struct msg_req *req;
937 int err = 0;
938
939 mutex_lock(&pfvf->mbox.lock);
940 req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox);
941 if (!req) {
942 mutex_unlock(&pfvf->mbox.lock);
943 return ERR_PTR(-ENOMEM);
944 }
945
946 err = otx2_sync_mbox_msg(&pfvf->mbox);
947 if (!err) {
948 rsp = (struct cgx_fw_data *)
949 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
950 } else {
951 rsp = ERR_PTR(err);
952 }
953
954 mutex_unlock(&pfvf->mbox.lock);
955 return rsp;
956}
957
958static int otx2_get_fecparam(struct net_device *netdev,
959 struct ethtool_fecparam *fecparam)
960{
961 struct otx2_nic *pfvf = netdev_priv(netdev);
962 struct cgx_fw_data *rsp;
963 const int fec[] = {
964 ETHTOOL_FEC_OFF,
965 ETHTOOL_FEC_BASER,
966 ETHTOOL_FEC_RS,
967 ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS};
968#define FEC_MAX_INDEX 4
969 if (pfvf->linfo.fec < FEC_MAX_INDEX)
970 fecparam->active_fec = fec[pfvf->linfo.fec];
971
972 rsp = otx2_get_fwdata(pfvf);
973 if (IS_ERR(rsp))
974 return PTR_ERR(rsp);
975
976 if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) {
977 if (!rsp->fwdata.supported_fec)
978 fecparam->fec = ETHTOOL_FEC_NONE;
979 else
980 fecparam->fec = fec[rsp->fwdata.supported_fec];
981 }
982 return 0;
983}
984
985static int otx2_set_fecparam(struct net_device *netdev,
986 struct ethtool_fecparam *fecparam)
987{
988 struct otx2_nic *pfvf = netdev_priv(netdev);
989 struct mbox *mbox = &pfvf->mbox;
990 struct fec_mode *req, *rsp;
991 int err = 0, fec = 0;
992
993 switch (fecparam->fec) {
994
995 case ETHTOOL_FEC_OFF:
996 case ETHTOOL_FEC_AUTO:
997 fec = OTX2_FEC_OFF;
998 break;
999 case ETHTOOL_FEC_RS:
1000 fec = OTX2_FEC_RS;
1001 break;
1002 case ETHTOOL_FEC_BASER:
1003 fec = OTX2_FEC_BASER;
1004 break;
1005 default:
1006 netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d",
1007 fecparam->fec);
1008 return -EINVAL;
1009 }
1010
1011 if (fec == pfvf->linfo.fec)
1012 return 0;
1013
1014 mutex_lock(&mbox->lock);
1015 req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox);
1016 if (!req) {
1017 err = -ENOMEM;
1018 goto end;
1019 }
1020 req->fec = fec;
1021 err = otx2_sync_mbox_msg(&pfvf->mbox);
1022 if (err)
1023 goto end;
1024
1025 rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
1026 0, &req->hdr);
1027 if (rsp->fec >= 0)
1028 pfvf->linfo.fec = rsp->fec;
1029 else
1030 err = rsp->fec;
1031end:
1032 mutex_unlock(&mbox->lock);
1033 return err;
1034}
1035
1036static void otx2_get_fec_info(u64 index, int req_mode,
1037 struct ethtool_link_ksettings *link_ksettings)
1038{
1039 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, };
1040
1041 switch (index) {
1042 case OTX2_FEC_NONE:
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1044 otx2_fec_modes);
1045 break;
1046 case OTX2_FEC_BASER:
1047 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1048 otx2_fec_modes);
1049 break;
1050 case OTX2_FEC_RS:
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1052 otx2_fec_modes);
1053 break;
1054 case OTX2_FEC_BASER | OTX2_FEC_RS:
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1056 otx2_fec_modes);
1057 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1058 otx2_fec_modes);
1059 break;
1060 }
1061
1062
1063 if (req_mode == OTX2_MODE_ADVERTISED)
1064 linkmode_or(link_ksettings->link_modes.advertising,
1065 link_ksettings->link_modes.advertising,
1066 otx2_fec_modes);
1067 else
1068 linkmode_or(link_ksettings->link_modes.supported,
1069 link_ksettings->link_modes.supported,
1070 otx2_fec_modes);
1071}
1072
1073static void otx2_get_link_mode_info(u64 link_mode_bmap,
1074 bool req_mode,
1075 struct ethtool_link_ksettings
1076 *link_ksettings)
1077{
1078 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, };
1079 const int otx2_sgmii_features[6] = {
1080 ETHTOOL_LINK_MODE_10baseT_Half_BIT,
1081 ETHTOOL_LINK_MODE_10baseT_Full_BIT,
1082 ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1083 ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1084 ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
1085 ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1086 };
1087
1088 const int cgx_link_mode[27] = {
1089 0,
1090 ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1091 ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1092 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1093 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1094 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1095 0,
1096 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1097 0,
1098 0,
1099 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1100 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1101 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1102 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1103 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1104 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1105 0,
1106 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
1107 0,
1108 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1109 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
1110 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
1111 0,
1112 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1113 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1114 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1115 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
1116 };
1117 u8 bit;
1118
1119 link_mode_bmap = link_mode_bmap & OTX2_ETHTOOL_SUPPORTED_MODES;
1120
1121 for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) {
1122
1123 if (bit == 0)
1124 linkmode_set_bit_array(otx2_sgmii_features,
1125 ARRAY_SIZE(otx2_sgmii_features),
1126 otx2_link_modes);
1127 else
1128 linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes);
1129 }
1130
1131 if (req_mode == OTX2_MODE_ADVERTISED)
1132 linkmode_copy(link_ksettings->link_modes.advertising,
1133 otx2_link_modes);
1134 else
1135 linkmode_copy(link_ksettings->link_modes.supported,
1136 otx2_link_modes);
1137}
1138
1139static int otx2_get_link_ksettings(struct net_device *netdev,
1140 struct ethtool_link_ksettings *cmd)
1141{
1142 struct otx2_nic *pfvf = netdev_priv(netdev);
1143 struct cgx_fw_data *rsp = NULL;
1144
1145 cmd->base.duplex = pfvf->linfo.full_duplex;
1146 cmd->base.speed = pfvf->linfo.speed;
1147 cmd->base.autoneg = pfvf->linfo.an;
1148
1149 rsp = otx2_get_fwdata(pfvf);
1150 if (IS_ERR(rsp))
1151 return PTR_ERR(rsp);
1152
1153 if (rsp->fwdata.supported_an)
1154 ethtool_link_ksettings_add_link_mode(cmd,
1155 supported,
1156 Autoneg);
1157
1158 otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes,
1159 OTX2_MODE_ADVERTISED, cmd);
1160 otx2_get_fec_info(rsp->fwdata.advertised_fec,
1161 OTX2_MODE_ADVERTISED, cmd);
1162 otx2_get_link_mode_info(rsp->fwdata.supported_link_modes,
1163 OTX2_MODE_SUPPORTED, cmd);
1164 otx2_get_fec_info(rsp->fwdata.supported_fec,
1165 OTX2_MODE_SUPPORTED, cmd);
1166 return 0;
1167}
1168
1169static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd,
1170 u64 *mode)
1171{
1172 u32 bit_pos;
1173
1174
1175
1176
1177 bit_pos = find_first_bit(cmd->link_modes.advertising,
1178 __ETHTOOL_LINK_MODE_MASK_NBITS);
1179 if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS)
1180 *mode = bit_pos;
1181}
1182
1183static int otx2_set_link_ksettings(struct net_device *netdev,
1184 const struct ethtool_link_ksettings *cmd)
1185{
1186 struct otx2_nic *pf = netdev_priv(netdev);
1187 struct ethtool_link_ksettings cur_ks;
1188 struct cgx_set_link_mode_req *req;
1189 struct mbox *mbox = &pf->mbox;
1190 int err = 0;
1191
1192 memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings));
1193
1194 if (!ethtool_validate_speed(cmd->base.speed) ||
1195 !ethtool_validate_duplex(cmd->base.duplex))
1196 return -EINVAL;
1197
1198 if (cmd->base.autoneg != AUTONEG_ENABLE &&
1199 cmd->base.autoneg != AUTONEG_DISABLE)
1200 return -EINVAL;
1201
1202 otx2_get_link_ksettings(netdev, &cur_ks);
1203
1204
1205 if (!bitmap_subset(cmd->link_modes.advertising,
1206 cur_ks.link_modes.supported,
1207 __ETHTOOL_LINK_MODE_MASK_NBITS))
1208 return -EINVAL;
1209
1210 mutex_lock(&mbox->lock);
1211 req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox);
1212 if (!req) {
1213 err = -ENOMEM;
1214 goto end;
1215 }
1216
1217 req->args.speed = cmd->base.speed;
1218
1219
1220
1221 req->args.duplex = cmd->base.duplex ^ 0x1;
1222 req->args.an = cmd->base.autoneg;
1223 otx2_get_advertised_mode(cmd, &req->args.mode);
1224
1225 err = otx2_sync_mbox_msg(&pf->mbox);
1226end:
1227 mutex_unlock(&mbox->lock);
1228 return err;
1229}
1230
1231static const struct ethtool_ops otx2_ethtool_ops = {
1232 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1233 ETHTOOL_COALESCE_MAX_FRAMES,
1234 .get_link = otx2_get_link,
1235 .get_drvinfo = otx2_get_drvinfo,
1236 .get_strings = otx2_get_strings,
1237 .get_ethtool_stats = otx2_get_ethtool_stats,
1238 .get_sset_count = otx2_get_sset_count,
1239 .set_channels = otx2_set_channels,
1240 .get_channels = otx2_get_channels,
1241 .get_ringparam = otx2_get_ringparam,
1242 .set_ringparam = otx2_set_ringparam,
1243 .get_coalesce = otx2_get_coalesce,
1244 .set_coalesce = otx2_set_coalesce,
1245 .get_rxnfc = otx2_get_rxnfc,
1246 .set_rxnfc = otx2_set_rxnfc,
1247 .get_rxfh_key_size = otx2_get_rxfh_key_size,
1248 .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
1249 .get_rxfh = otx2_get_rxfh,
1250 .set_rxfh = otx2_set_rxfh,
1251 .get_rxfh_context = otx2_get_rxfh_context,
1252 .set_rxfh_context = otx2_set_rxfh_context,
1253 .get_msglevel = otx2_get_msglevel,
1254 .set_msglevel = otx2_set_msglevel,
1255 .get_pauseparam = otx2_get_pauseparam,
1256 .set_pauseparam = otx2_set_pauseparam,
1257 .get_ts_info = otx2_get_ts_info,
1258 .get_fecparam = otx2_get_fecparam,
1259 .set_fecparam = otx2_set_fecparam,
1260 .get_link_ksettings = otx2_get_link_ksettings,
1261 .set_link_ksettings = otx2_set_link_ksettings,
1262};
1263
1264void otx2_set_ethtool_ops(struct net_device *netdev)
1265{
1266 netdev->ethtool_ops = &otx2_ethtool_ops;
1267}
1268
1269
1270static void otx2vf_get_drvinfo(struct net_device *netdev,
1271 struct ethtool_drvinfo *info)
1272{
1273 struct otx2_nic *vf = netdev_priv(netdev);
1274
1275 strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
1276 strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
1277}
1278
1279static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
1280{
1281 struct otx2_nic *vf = netdev_priv(netdev);
1282 int stats;
1283
1284 if (sset != ETH_SS_STATS)
1285 return;
1286
1287 for (stats = 0; stats < otx2_n_dev_stats; stats++) {
1288 memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
1289 data += ETH_GSTRING_LEN;
1290 }
1291
1292 for (stats = 0; stats < otx2_n_drv_stats; stats++) {
1293 memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
1294 data += ETH_GSTRING_LEN;
1295 }
1296
1297 otx2_get_qset_strings(vf, &data, 0);
1298
1299 strcpy(data, "reset_count");
1300 data += ETH_GSTRING_LEN;
1301}
1302
1303static void otx2vf_get_ethtool_stats(struct net_device *netdev,
1304 struct ethtool_stats *stats, u64 *data)
1305{
1306 struct otx2_nic *vf = netdev_priv(netdev);
1307 int stat;
1308
1309 otx2_get_dev_stats(vf);
1310 for (stat = 0; stat < otx2_n_dev_stats; stat++)
1311 *(data++) = ((u64 *)&vf->hw.dev_stats)
1312 [otx2_dev_stats[stat].index];
1313
1314 for (stat = 0; stat < otx2_n_drv_stats; stat++)
1315 *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats)
1316 [otx2_drv_stats[stat].index]);
1317
1318 otx2_get_qset_stats(vf, stats, &data);
1319 *(data++) = vf->reset_count;
1320}
1321
1322static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
1323{
1324 struct otx2_nic *vf = netdev_priv(netdev);
1325 int qstats_count;
1326
1327 if (sset != ETH_SS_STATS)
1328 return -EINVAL;
1329
1330 qstats_count = otx2_n_queue_stats *
1331 (vf->hw.rx_queues + vf->hw.tx_queues);
1332
1333 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
1334}
1335
1336static int otx2vf_get_link_ksettings(struct net_device *netdev,
1337 struct ethtool_link_ksettings *cmd)
1338{
1339 struct otx2_nic *pfvf = netdev_priv(netdev);
1340
1341 if (is_otx2_lbkvf(pfvf->pdev)) {
1342 cmd->base.duplex = DUPLEX_FULL;
1343 cmd->base.speed = SPEED_100000;
1344 } else {
1345 return otx2_get_link_ksettings(netdev, cmd);
1346 }
1347 return 0;
1348}
1349
1350static const struct ethtool_ops otx2vf_ethtool_ops = {
1351 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1352 ETHTOOL_COALESCE_MAX_FRAMES,
1353 .get_link = otx2_get_link,
1354 .get_drvinfo = otx2vf_get_drvinfo,
1355 .get_strings = otx2vf_get_strings,
1356 .get_ethtool_stats = otx2vf_get_ethtool_stats,
1357 .get_sset_count = otx2vf_get_sset_count,
1358 .set_channels = otx2_set_channels,
1359 .get_channels = otx2_get_channels,
1360 .get_rxnfc = otx2vf_get_rxnfc,
1361 .set_rxnfc = otx2vf_set_rxnfc,
1362 .get_rxfh_key_size = otx2_get_rxfh_key_size,
1363 .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
1364 .get_rxfh = otx2_get_rxfh,
1365 .set_rxfh = otx2_set_rxfh,
1366 .get_rxfh_context = otx2_get_rxfh_context,
1367 .set_rxfh_context = otx2_set_rxfh_context,
1368 .get_ringparam = otx2_get_ringparam,
1369 .set_ringparam = otx2_set_ringparam,
1370 .get_coalesce = otx2_get_coalesce,
1371 .set_coalesce = otx2_set_coalesce,
1372 .get_msglevel = otx2_get_msglevel,
1373 .set_msglevel = otx2_set_msglevel,
1374 .get_pauseparam = otx2_get_pauseparam,
1375 .set_pauseparam = otx2_set_pauseparam,
1376 .get_link_ksettings = otx2vf_get_link_ksettings,
1377};
1378
1379void otx2vf_set_ethtool_ops(struct net_device *netdev)
1380{
1381 netdev->ethtool_ops = &otx2vf_ethtool_ops;
1382}
1383EXPORT_SYMBOL(otx2vf_set_ethtool_ops);
1384