1
2
3
4
5
6#include <linux/firmware.h>
7#include <linux/mdio.h>
8
9#include "cxgb4.h"
10#include "t4_regs.h"
11#include "t4fw_api.h"
12#include "cxgb4_cudbg.h"
13#include "cxgb4_filter.h"
14#include "cxgb4_tc_flower.h"
15
16#define EEPROM_MAGIC 0x38E2F10C
17
18static u32 get_msglevel(struct net_device *dev)
19{
20 return netdev2adap(dev)->msg_enable;
21}
22
23static void set_msglevel(struct net_device *dev, u32 val)
24{
25 netdev2adap(dev)->msg_enable = val;
26}
27
28enum cxgb4_ethtool_tests {
29 CXGB4_ETHTOOL_LB_TEST,
30 CXGB4_ETHTOOL_MAX_TEST,
31};
32
33static const char cxgb4_selftest_strings[CXGB4_ETHTOOL_MAX_TEST][ETH_GSTRING_LEN] = {
34 "Loop back test (offline)",
35};
36
37static const char * const flash_region_strings[] = {
38 "All",
39 "Firmware",
40 "PHY Firmware",
41 "Boot",
42 "Boot CFG",
43};
44
45static const char stats_strings[][ETH_GSTRING_LEN] = {
46 "tx_octets_ok ",
47 "tx_frames_ok ",
48 "tx_broadcast_frames ",
49 "tx_multicast_frames ",
50 "tx_unicast_frames ",
51 "tx_error_frames ",
52
53 "tx_frames_64 ",
54 "tx_frames_65_to_127 ",
55 "tx_frames_128_to_255 ",
56 "tx_frames_256_to_511 ",
57 "tx_frames_512_to_1023 ",
58 "tx_frames_1024_to_1518 ",
59 "tx_frames_1519_to_max ",
60
61 "tx_frames_dropped ",
62 "tx_pause_frames ",
63 "tx_ppp0_frames ",
64 "tx_ppp1_frames ",
65 "tx_ppp2_frames ",
66 "tx_ppp3_frames ",
67 "tx_ppp4_frames ",
68 "tx_ppp5_frames ",
69 "tx_ppp6_frames ",
70 "tx_ppp7_frames ",
71
72 "rx_octets_ok ",
73 "rx_frames_ok ",
74 "rx_broadcast_frames ",
75 "rx_multicast_frames ",
76 "rx_unicast_frames ",
77
78 "rx_frames_too_long ",
79 "rx_jabber_errors ",
80 "rx_fcs_errors ",
81 "rx_length_errors ",
82 "rx_symbol_errors ",
83 "rx_runt_frames ",
84
85 "rx_frames_64 ",
86 "rx_frames_65_to_127 ",
87 "rx_frames_128_to_255 ",
88 "rx_frames_256_to_511 ",
89 "rx_frames_512_to_1023 ",
90 "rx_frames_1024_to_1518 ",
91 "rx_frames_1519_to_max ",
92
93 "rx_pause_frames ",
94 "rx_ppp0_frames ",
95 "rx_ppp1_frames ",
96 "rx_ppp2_frames ",
97 "rx_ppp3_frames ",
98 "rx_ppp4_frames ",
99 "rx_ppp5_frames ",
100 "rx_ppp6_frames ",
101 "rx_ppp7_frames ",
102
103 "rx_bg0_frames_dropped ",
104 "rx_bg1_frames_dropped ",
105 "rx_bg2_frames_dropped ",
106 "rx_bg3_frames_dropped ",
107 "rx_bg0_frames_trunc ",
108 "rx_bg1_frames_trunc ",
109 "rx_bg2_frames_trunc ",
110 "rx_bg3_frames_trunc ",
111
112 "tso ",
113 "uso ",
114 "tx_csum_offload ",
115 "rx_csum_good ",
116 "vlan_extractions ",
117 "vlan_insertions ",
118 "gro_packets ",
119 "gro_merged ",
120#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
121 "tx_tls_encrypted_packets",
122 "tx_tls_encrypted_bytes ",
123 "tx_tls_ctx ",
124 "tx_tls_ooo ",
125 "tx_tls_skip_no_sync_data",
126 "tx_tls_drop_no_sync_data",
127 "tx_tls_drop_bypass_req ",
128#endif
129};
130
131static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
132 "db_drop ",
133 "db_full ",
134 "db_empty ",
135 "write_coal_success ",
136 "write_coal_fail ",
137};
138
139static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
140 "-------Loopback----------- ",
141 "octets_ok ",
142 "frames_ok ",
143 "bcast_frames ",
144 "mcast_frames ",
145 "ucast_frames ",
146 "error_frames ",
147 "frames_64 ",
148 "frames_65_to_127 ",
149 "frames_128_to_255 ",
150 "frames_256_to_511 ",
151 "frames_512_to_1023 ",
152 "frames_1024_to_1518 ",
153 "frames_1519_to_max ",
154 "frames_dropped ",
155 "bg0_frames_dropped ",
156 "bg1_frames_dropped ",
157 "bg2_frames_dropped ",
158 "bg3_frames_dropped ",
159 "bg0_frames_trunc ",
160 "bg1_frames_trunc ",
161 "bg2_frames_trunc ",
162 "bg3_frames_trunc ",
163};
164
165static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = {
166 [PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr",
167};
168
169static int get_sset_count(struct net_device *dev, int sset)
170{
171 switch (sset) {
172 case ETH_SS_STATS:
173 return ARRAY_SIZE(stats_strings) +
174 ARRAY_SIZE(adapter_stats_strings) +
175 ARRAY_SIZE(loopback_stats_strings);
176 case ETH_SS_PRIV_FLAGS:
177 return ARRAY_SIZE(cxgb4_priv_flags_strings);
178 case ETH_SS_TEST:
179 return ARRAY_SIZE(cxgb4_selftest_strings);
180 default:
181 return -EOPNOTSUPP;
182 }
183}
184
185static int get_regs_len(struct net_device *dev)
186{
187 struct adapter *adap = netdev2adap(dev);
188
189 return t4_get_regs_len(adap);
190}
191
192static int get_eeprom_len(struct net_device *dev)
193{
194 return EEPROMSIZE;
195}
196
197static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
198{
199 struct adapter *adapter = netdev2adap(dev);
200 u32 exprom_vers;
201
202 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
203 strlcpy(info->bus_info, pci_name(adapter->pdev),
204 sizeof(info->bus_info));
205 info->regdump_len = get_regs_len(dev);
206
207 if (adapter->params.fw_vers)
208 snprintf(info->fw_version, sizeof(info->fw_version),
209 "%u.%u.%u.%u, TP %u.%u.%u.%u",
210 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
211 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
212 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
213 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
214 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
215 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
216 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
217 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
218
219 if (!t4_get_exprom_version(adapter, &exprom_vers))
220 snprintf(info->erom_version, sizeof(info->erom_version),
221 "%u.%u.%u.%u",
222 FW_HDR_FW_VER_MAJOR_G(exprom_vers),
223 FW_HDR_FW_VER_MINOR_G(exprom_vers),
224 FW_HDR_FW_VER_MICRO_G(exprom_vers),
225 FW_HDR_FW_VER_BUILD_G(exprom_vers));
226 info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings);
227}
228
229static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
230{
231 if (stringset == ETH_SS_STATS) {
232 memcpy(data, stats_strings, sizeof(stats_strings));
233 data += sizeof(stats_strings);
234 memcpy(data, adapter_stats_strings,
235 sizeof(adapter_stats_strings));
236 data += sizeof(adapter_stats_strings);
237 memcpy(data, loopback_stats_strings,
238 sizeof(loopback_stats_strings));
239 } else if (stringset == ETH_SS_PRIV_FLAGS) {
240 memcpy(data, cxgb4_priv_flags_strings,
241 sizeof(cxgb4_priv_flags_strings));
242 } else if (stringset == ETH_SS_TEST) {
243 memcpy(data, cxgb4_selftest_strings,
244 sizeof(cxgb4_selftest_strings));
245 }
246}
247
248
249
250
251struct queue_port_stats {
252 u64 tso;
253 u64 uso;
254 u64 tx_csum;
255 u64 rx_csum;
256 u64 vlan_ex;
257 u64 vlan_ins;
258 u64 gro_pkts;
259 u64 gro_merged;
260#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
261 u64 tx_tls_encrypted_packets;
262 u64 tx_tls_encrypted_bytes;
263 u64 tx_tls_ctx;
264 u64 tx_tls_ooo;
265 u64 tx_tls_skip_no_sync_data;
266 u64 tx_tls_drop_no_sync_data;
267 u64 tx_tls_drop_bypass_req;
268#endif
269};
270
271struct adapter_stats {
272 u64 db_drop;
273 u64 db_full;
274 u64 db_empty;
275 u64 wc_success;
276 u64 wc_fail;
277};
278
279static void collect_sge_port_stats(const struct adapter *adap,
280 const struct port_info *p,
281 struct queue_port_stats *s)
282{
283 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
284 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
285#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
286 const struct ch_ktls_port_stats_debug *ktls_stats;
287#endif
288 struct sge_eohw_txq *eohw_tx;
289 unsigned int i;
290
291 memset(s, 0, sizeof(*s));
292 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
293 s->tso += tx->tso;
294 s->uso += tx->uso;
295 s->tx_csum += tx->tx_cso;
296 s->rx_csum += rx->stats.rx_cso;
297 s->vlan_ex += rx->stats.vlan_ex;
298 s->vlan_ins += tx->vlan_ins;
299 s->gro_pkts += rx->stats.lro_pkts;
300 s->gro_merged += rx->stats.lro_merged;
301 }
302
303 if (adap->sge.eohw_txq) {
304 eohw_tx = &adap->sge.eohw_txq[p->first_qset];
305 for (i = 0; i < p->nqsets; i++, eohw_tx++) {
306 s->tso += eohw_tx->tso;
307 s->uso += eohw_tx->uso;
308 s->tx_csum += eohw_tx->tx_cso;
309 s->vlan_ins += eohw_tx->vlan_ins;
310 }
311 }
312#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
313 ktls_stats = &adap->ch_ktls_stats.ktls_port[p->port_id];
314 s->tx_tls_encrypted_packets =
315 atomic64_read(&ktls_stats->ktls_tx_encrypted_packets);
316 s->tx_tls_encrypted_bytes =
317 atomic64_read(&ktls_stats->ktls_tx_encrypted_bytes);
318 s->tx_tls_ctx = atomic64_read(&ktls_stats->ktls_tx_ctx);
319 s->tx_tls_ooo = atomic64_read(&ktls_stats->ktls_tx_ooo);
320 s->tx_tls_skip_no_sync_data =
321 atomic64_read(&ktls_stats->ktls_tx_skip_no_sync_data);
322 s->tx_tls_drop_no_sync_data =
323 atomic64_read(&ktls_stats->ktls_tx_drop_no_sync_data);
324 s->tx_tls_drop_bypass_req =
325 atomic64_read(&ktls_stats->ktls_tx_drop_bypass_req);
326#endif
327}
328
329static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
330{
331 u64 val1, val2;
332
333 memset(s, 0, sizeof(*s));
334
335 s->db_drop = adap->db_stats.db_drop;
336 s->db_full = adap->db_stats.db_full;
337 s->db_empty = adap->db_stats.db_empty;
338
339 if (!is_t4(adap->params.chip)) {
340 int v;
341
342 v = t4_read_reg(adap, SGE_STAT_CFG_A);
343 if (STATSOURCE_T5_G(v) == 7) {
344 val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
345 val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
346 s->wc_success = val1 - val2;
347 s->wc_fail = val2;
348 }
349 }
350}
351
352static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
353 u64 *data)
354{
355 struct port_info *pi = netdev_priv(dev);
356 struct adapter *adapter = pi->adapter;
357 struct lb_port_stats s;
358 int i;
359 u64 *p0;
360
361 t4_get_port_stats_offset(adapter, pi->tx_chan,
362 (struct port_stats *)data,
363 &pi->stats_base);
364
365 data += sizeof(struct port_stats) / sizeof(u64);
366 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
367 data += sizeof(struct queue_port_stats) / sizeof(u64);
368 collect_adapter_stats(adapter, (struct adapter_stats *)data);
369 data += sizeof(struct adapter_stats) / sizeof(u64);
370
371 *data++ = (u64)pi->port_id;
372 memset(&s, 0, sizeof(s));
373 t4_get_lb_stats(adapter, pi->port_id, &s);
374
375 p0 = &s.octets;
376 for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++)
377 *data++ = (unsigned long long)*p0++;
378}
379
380static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
381 void *buf)
382{
383 struct adapter *adap = netdev2adap(dev);
384 size_t buf_size;
385
386 buf_size = t4_get_regs_len(adap);
387 regs->version = mk_adap_vers(adap);
388 t4_get_regs(adap, buf, buf_size);
389}
390
391static int restart_autoneg(struct net_device *dev)
392{
393 struct port_info *p = netdev_priv(dev);
394
395 if (!netif_running(dev))
396 return -EAGAIN;
397 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
398 return -EINVAL;
399 t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
400 return 0;
401}
402
403static int identify_port(struct net_device *dev,
404 enum ethtool_phys_id_state state)
405{
406 unsigned int val;
407 struct adapter *adap = netdev2adap(dev);
408
409 if (state == ETHTOOL_ID_ACTIVE)
410 val = 0xffff;
411 else if (state == ETHTOOL_ID_INACTIVE)
412 val = 0;
413 else
414 return -EINVAL;
415
416 return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
417}
418
419
420
421
422
423
424
425
426static int from_fw_port_mod_type(enum fw_port_type port_type,
427 enum fw_port_module_type mod_type)
428{
429 if (port_type == FW_PORT_TYPE_BT_SGMII ||
430 port_type == FW_PORT_TYPE_BT_XFI ||
431 port_type == FW_PORT_TYPE_BT_XAUI) {
432 return PORT_TP;
433 } else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
434 port_type == FW_PORT_TYPE_FIBER_XAUI) {
435 return PORT_FIBRE;
436 } else if (port_type == FW_PORT_TYPE_SFP ||
437 port_type == FW_PORT_TYPE_QSFP_10G ||
438 port_type == FW_PORT_TYPE_QSA ||
439 port_type == FW_PORT_TYPE_QSFP ||
440 port_type == FW_PORT_TYPE_CR4_QSFP ||
441 port_type == FW_PORT_TYPE_CR_QSFP ||
442 port_type == FW_PORT_TYPE_CR2_QSFP ||
443 port_type == FW_PORT_TYPE_SFP28) {
444 if (mod_type == FW_PORT_MOD_TYPE_LR ||
445 mod_type == FW_PORT_MOD_TYPE_SR ||
446 mod_type == FW_PORT_MOD_TYPE_ER ||
447 mod_type == FW_PORT_MOD_TYPE_LRM)
448 return PORT_FIBRE;
449 else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
450 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
451 return PORT_DA;
452 else
453 return PORT_OTHER;
454 } else if (port_type == FW_PORT_TYPE_KR4_100G ||
455 port_type == FW_PORT_TYPE_KR_SFP28 ||
456 port_type == FW_PORT_TYPE_KR_XLAUI) {
457 return PORT_NONE;
458 }
459
460 return PORT_OTHER;
461}
462
463
464
465
466
467
468
469
470static unsigned int speed_to_fw_caps(int speed)
471{
472 if (speed == 100)
473 return FW_PORT_CAP32_SPEED_100M;
474 if (speed == 1000)
475 return FW_PORT_CAP32_SPEED_1G;
476 if (speed == 10000)
477 return FW_PORT_CAP32_SPEED_10G;
478 if (speed == 25000)
479 return FW_PORT_CAP32_SPEED_25G;
480 if (speed == 40000)
481 return FW_PORT_CAP32_SPEED_40G;
482 if (speed == 50000)
483 return FW_PORT_CAP32_SPEED_50G;
484 if (speed == 100000)
485 return FW_PORT_CAP32_SPEED_100G;
486 if (speed == 200000)
487 return FW_PORT_CAP32_SPEED_200G;
488 if (speed == 400000)
489 return FW_PORT_CAP32_SPEED_400G;
490 return 0;
491}
492
493
494
495
496
497
498
499
500
501
502static void fw_caps_to_lmm(enum fw_port_type port_type,
503 fw_port_cap32_t fw_caps,
504 unsigned long *link_mode_mask)
505{
506 #define SET_LMM(__lmm_name) \
507 do { \
508 __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
509 link_mode_mask); \
510 } while (0)
511
512 #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
513 do { \
514 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
515 SET_LMM(__lmm_name); \
516 } while (0)
517
518 switch (port_type) {
519 case FW_PORT_TYPE_BT_SGMII:
520 case FW_PORT_TYPE_BT_XFI:
521 case FW_PORT_TYPE_BT_XAUI:
522 SET_LMM(TP);
523 FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
524 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
525 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
526 break;
527
528 case FW_PORT_TYPE_KX4:
529 case FW_PORT_TYPE_KX:
530 SET_LMM(Backplane);
531 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
532 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
533 break;
534
535 case FW_PORT_TYPE_KR:
536 SET_LMM(Backplane);
537 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
538 break;
539
540 case FW_PORT_TYPE_BP_AP:
541 SET_LMM(Backplane);
542 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
543 FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
544 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
545 break;
546
547 case FW_PORT_TYPE_BP4_AP:
548 SET_LMM(Backplane);
549 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
550 FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
551 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
552 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
553 break;
554
555 case FW_PORT_TYPE_FIBER_XFI:
556 case FW_PORT_TYPE_FIBER_XAUI:
557 case FW_PORT_TYPE_SFP:
558 case FW_PORT_TYPE_QSFP_10G:
559 case FW_PORT_TYPE_QSA:
560 SET_LMM(FIBRE);
561 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
562 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
563 break;
564
565 case FW_PORT_TYPE_BP40_BA:
566 case FW_PORT_TYPE_QSFP:
567 SET_LMM(FIBRE);
568 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
569 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
570 FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
571 break;
572
573 case FW_PORT_TYPE_CR_QSFP:
574 case FW_PORT_TYPE_SFP28:
575 SET_LMM(FIBRE);
576 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
577 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
578 FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
579 break;
580
581 case FW_PORT_TYPE_KR_SFP28:
582 SET_LMM(Backplane);
583 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
584 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
585 FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
586 break;
587
588 case FW_PORT_TYPE_KR_XLAUI:
589 SET_LMM(Backplane);
590 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
591 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
592 FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
593 break;
594
595 case FW_PORT_TYPE_CR2_QSFP:
596 SET_LMM(FIBRE);
597 FW_CAPS_TO_LMM(SPEED_50G, 50000baseSR2_Full);
598 break;
599
600 case FW_PORT_TYPE_KR4_100G:
601 case FW_PORT_TYPE_CR4_QSFP:
602 SET_LMM(FIBRE);
603 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
604 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
605 FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
606 FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
607 FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
608 FW_CAPS_TO_LMM(SPEED_100G, 100000baseCR4_Full);
609 break;
610
611 default:
612 break;
613 }
614
615 if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
616 FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
617 FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
618 } else {
619 SET_LMM(FEC_NONE);
620 }
621
622 FW_CAPS_TO_LMM(ANEG, Autoneg);
623 FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
624 FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
625
626 #undef FW_CAPS_TO_LMM
627 #undef SET_LMM
628}
629
630
631
632
633
634
635
636
637
638static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask)
639{
640 unsigned int fw_caps = 0;
641
642 #define LMM_TO_FW_CAPS(__lmm_name, __fw_name) \
643 do { \
644 if (test_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
645 link_mode_mask)) \
646 fw_caps |= FW_PORT_CAP32_ ## __fw_name; \
647 } while (0)
648
649 LMM_TO_FW_CAPS(100baseT_Full, SPEED_100M);
650 LMM_TO_FW_CAPS(1000baseT_Full, SPEED_1G);
651 LMM_TO_FW_CAPS(10000baseT_Full, SPEED_10G);
652 LMM_TO_FW_CAPS(40000baseSR4_Full, SPEED_40G);
653 LMM_TO_FW_CAPS(25000baseCR_Full, SPEED_25G);
654 LMM_TO_FW_CAPS(50000baseCR2_Full, SPEED_50G);
655 LMM_TO_FW_CAPS(100000baseCR4_Full, SPEED_100G);
656
657 #undef LMM_TO_FW_CAPS
658
659 return fw_caps;
660}
661
662static int get_link_ksettings(struct net_device *dev,
663 struct ethtool_link_ksettings *link_ksettings)
664{
665 struct port_info *pi = netdev_priv(dev);
666 struct ethtool_link_settings *base = &link_ksettings->base;
667
668
669
670
671
672 if (!netif_running(dev))
673 (void)t4_update_port_info(pi);
674
675 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
676 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
677 ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
678
679 base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
680
681 if (pi->mdio_addr >= 0) {
682 base->phy_address = pi->mdio_addr;
683 base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
684 ? ETH_MDIO_SUPPORTS_C22
685 : ETH_MDIO_SUPPORTS_C45);
686 } else {
687 base->phy_address = 255;
688 base->mdio_support = 0;
689 }
690
691 fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
692 link_ksettings->link_modes.supported);
693 fw_caps_to_lmm(pi->port_type,
694 t4_link_acaps(pi->adapter,
695 pi->lport,
696 &pi->link_cfg),
697 link_ksettings->link_modes.advertising);
698 fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
699 link_ksettings->link_modes.lp_advertising);
700
701 base->speed = (netif_carrier_ok(dev)
702 ? pi->link_cfg.speed
703 : SPEED_UNKNOWN);
704 base->duplex = DUPLEX_FULL;
705
706 base->autoneg = pi->link_cfg.autoneg;
707 if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
708 ethtool_link_ksettings_add_link_mode(link_ksettings,
709 supported, Autoneg);
710 if (pi->link_cfg.autoneg)
711 ethtool_link_ksettings_add_link_mode(link_ksettings,
712 advertising, Autoneg);
713
714 return 0;
715}
716
717static int set_link_ksettings(struct net_device *dev,
718 const struct ethtool_link_ksettings *link_ksettings)
719{
720 struct port_info *pi = netdev_priv(dev);
721 struct link_config *lc = &pi->link_cfg;
722 const struct ethtool_link_settings *base = &link_ksettings->base;
723 struct link_config old_lc;
724 unsigned int fw_caps;
725 int ret = 0;
726
727
728 if (base->duplex != DUPLEX_FULL)
729 return -EINVAL;
730
731 old_lc = *lc;
732 if (!(lc->pcaps & FW_PORT_CAP32_ANEG) ||
733 base->autoneg == AUTONEG_DISABLE) {
734 fw_caps = speed_to_fw_caps(base->speed);
735
736
737 if (!(lc->pcaps & fw_caps))
738 return -EINVAL;
739
740 lc->speed_caps = fw_caps;
741 lc->acaps = fw_caps;
742 } else {
743 fw_caps =
744 lmm_to_fw_caps(link_ksettings->link_modes.advertising);
745 if (!(lc->pcaps & fw_caps))
746 return -EINVAL;
747 lc->speed_caps = 0;
748 lc->acaps = fw_caps | FW_PORT_CAP32_ANEG;
749 }
750 lc->autoneg = base->autoneg;
751
752
753
754
755 ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, pi->tx_chan, lc);
756 if (ret)
757 *lc = old_lc;
758
759 return ret;
760}
761
762
763static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
764{
765 unsigned int eth_fec = 0;
766
767 if (fw_fec & FW_PORT_CAP32_FEC_RS)
768 eth_fec |= ETHTOOL_FEC_RS;
769 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
770 eth_fec |= ETHTOOL_FEC_BASER;
771
772
773 if (!eth_fec)
774 eth_fec = ETHTOOL_FEC_OFF;
775
776 return eth_fec;
777}
778
779
780static inline unsigned int cc_to_eth_fec(unsigned int cc_fec)
781{
782 unsigned int eth_fec = 0;
783
784 if (cc_fec & FEC_AUTO)
785 eth_fec |= ETHTOOL_FEC_AUTO;
786 if (cc_fec & FEC_RS)
787 eth_fec |= ETHTOOL_FEC_RS;
788 if (cc_fec & FEC_BASER_RS)
789 eth_fec |= ETHTOOL_FEC_BASER;
790
791
792 if (!eth_fec)
793 eth_fec = ETHTOOL_FEC_OFF;
794
795 return eth_fec;
796}
797
798
799static inline unsigned int eth_to_cc_fec(unsigned int eth_fec)
800{
801 unsigned int cc_fec = 0;
802
803 if (eth_fec & ETHTOOL_FEC_OFF)
804 return cc_fec;
805
806 if (eth_fec & ETHTOOL_FEC_AUTO)
807 cc_fec |= FEC_AUTO;
808 if (eth_fec & ETHTOOL_FEC_RS)
809 cc_fec |= FEC_RS;
810 if (eth_fec & ETHTOOL_FEC_BASER)
811 cc_fec |= FEC_BASER_RS;
812
813 return cc_fec;
814}
815
816static int get_fecparam(struct net_device *dev, struct ethtool_fecparam *fec)
817{
818 const struct port_info *pi = netdev_priv(dev);
819 const struct link_config *lc = &pi->link_cfg;
820
821
822
823
824
825 fec->fec = fwcap_to_eth_fec(lc->pcaps);
826 if (fec->fec != ETHTOOL_FEC_OFF)
827 fec->fec |= ETHTOOL_FEC_AUTO;
828
829
830
831
832 fec->active_fec = cc_to_eth_fec(lc->fec);
833
834 return 0;
835}
836
837static int set_fecparam(struct net_device *dev, struct ethtool_fecparam *fec)
838{
839 struct port_info *pi = netdev_priv(dev);
840 struct link_config *lc = &pi->link_cfg;
841 struct link_config old_lc;
842 int ret;
843
844
845
846
847 old_lc = *lc;
848
849
850
851
852 lc->requested_fec = eth_to_cc_fec(fec->fec);
853 ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox,
854 pi->tx_chan, lc);
855 if (ret)
856 *lc = old_lc;
857 return ret;
858}
859
860static void get_pauseparam(struct net_device *dev,
861 struct ethtool_pauseparam *epause)
862{
863 struct port_info *p = netdev_priv(dev);
864
865 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
866 epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0;
867 epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0;
868}
869
870static int set_pauseparam(struct net_device *dev,
871 struct ethtool_pauseparam *epause)
872{
873 struct port_info *p = netdev_priv(dev);
874 struct link_config *lc = &p->link_cfg;
875
876 if (epause->autoneg == AUTONEG_DISABLE)
877 lc->requested_fc = 0;
878 else if (lc->pcaps & FW_PORT_CAP32_ANEG)
879 lc->requested_fc = PAUSE_AUTONEG;
880 else
881 return -EINVAL;
882
883 if (epause->rx_pause)
884 lc->requested_fc |= PAUSE_RX;
885 if (epause->tx_pause)
886 lc->requested_fc |= PAUSE_TX;
887 if (netif_running(dev))
888 return t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan,
889 lc);
890 return 0;
891}
892
893static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
894{
895 const struct port_info *pi = netdev_priv(dev);
896 const struct sge *s = &pi->adapter->sge;
897
898 e->rx_max_pending = MAX_RX_BUFFERS;
899 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
900 e->rx_jumbo_max_pending = 0;
901 e->tx_max_pending = MAX_TXQ_ENTRIES;
902
903 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
904 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
905 e->rx_jumbo_pending = 0;
906 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
907}
908
909static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
910{
911 int i;
912 const struct port_info *pi = netdev_priv(dev);
913 struct adapter *adapter = pi->adapter;
914 struct sge *s = &adapter->sge;
915
916 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
917 e->tx_pending > MAX_TXQ_ENTRIES ||
918 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
919 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
920 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
921 return -EINVAL;
922
923 if (adapter->flags & CXGB4_FULL_INIT_DONE)
924 return -EBUSY;
925
926 for (i = 0; i < pi->nqsets; ++i) {
927 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
928 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
929 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
930 }
931 return 0;
932}
933
934
935
936
937
938
939
940
941
942static int set_rx_intr_params(struct net_device *dev,
943 unsigned int us, unsigned int cnt)
944{
945 int i, err;
946 struct port_info *pi = netdev_priv(dev);
947 struct adapter *adap = pi->adapter;
948 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
949
950 for (i = 0; i < pi->nqsets; i++, q++) {
951 err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt);
952 if (err)
953 return err;
954 }
955 return 0;
956}
957
958static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
959{
960 int i;
961 struct port_info *pi = netdev_priv(dev);
962 struct adapter *adap = pi->adapter;
963 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
964
965 for (i = 0; i < pi->nqsets; i++, q++)
966 q->rspq.adaptive_rx = adaptive_rx;
967
968 return 0;
969}
970
971static int get_adaptive_rx_setting(struct net_device *dev)
972{
973 struct port_info *pi = netdev_priv(dev);
974 struct adapter *adap = pi->adapter;
975 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
976
977 return q->rspq.adaptive_rx;
978}
979
980
981
982
983static int get_dbqtimer_tick(struct net_device *dev)
984{
985 struct port_info *pi = netdev_priv(dev);
986 struct adapter *adap = pi->adapter;
987
988 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
989 return 0;
990
991 return adap->sge.dbqtimer_tick;
992}
993
994
995
996
997static int get_dbqtimer(struct net_device *dev)
998{
999 struct port_info *pi = netdev_priv(dev);
1000 struct adapter *adap = pi->adapter;
1001 struct sge_eth_txq *txq;
1002
1003 txq = &adap->sge.ethtxq[pi->first_qset];
1004
1005 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
1006 return 0;
1007
1008
1009 return adap->sge.dbqtimer_val[txq->dbqtimerix];
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019static int set_dbqtimer_tick(struct net_device *dev, int usecs)
1020{
1021 struct port_info *pi = netdev_priv(dev);
1022 struct adapter *adap = pi->adapter;
1023 struct sge *s = &adap->sge;
1024 u32 param, val;
1025 int ret;
1026
1027 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
1028 return 0;
1029
1030
1031 if (s->dbqtimer_tick == usecs)
1032 return 0;
1033
1034
1035 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1036 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
1037 val = usecs;
1038 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
1039 if (ret)
1040 return ret;
1041 s->dbqtimer_tick = usecs;
1042
1043
1044 ret = t4_read_sge_dbqtimers(adap, ARRAY_SIZE(s->dbqtimer_val),
1045 s->dbqtimer_val);
1046 return ret;
1047}
1048
1049
1050
1051
1052
1053static int set_dbqtimer(struct net_device *dev, int usecs)
1054{
1055 int qix, timerix, min_timerix, delta, min_delta;
1056 struct port_info *pi = netdev_priv(dev);
1057 struct adapter *adap = pi->adapter;
1058 struct sge *s = &adap->sge;
1059 struct sge_eth_txq *txq;
1060 u32 param, val;
1061 int ret;
1062
1063 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
1064 return 0;
1065
1066
1067
1068
1069 min_delta = INT_MAX;
1070 min_timerix = 0;
1071 for (timerix = 0; timerix < ARRAY_SIZE(s->dbqtimer_val); timerix++) {
1072 delta = s->dbqtimer_val[timerix] - usecs;
1073 if (delta < 0)
1074 delta = -delta;
1075 if (delta < min_delta) {
1076 min_delta = delta;
1077 min_timerix = timerix;
1078 }
1079 }
1080
1081
1082
1083
1084
1085 txq = &s->ethtxq[pi->first_qset];
1086 if (txq->dbqtimerix == min_timerix)
1087 return 0;
1088
1089 for (qix = 0; qix < pi->nqsets; qix++, txq++) {
1090 if (adap->flags & CXGB4_FULL_INIT_DONE) {
1091 param =
1092 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1093 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX) |
1094 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
1095 val = min_timerix;
1096 ret = t4_set_params(adap, adap->mbox, adap->pf, 0,
1097 1, ¶m, &val);
1098 if (ret)
1099 return ret;
1100 }
1101 txq->dbqtimerix = min_timerix;
1102 }
1103 return 0;
1104}
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116static int set_dbqtimer_tickval(struct net_device *dev,
1117 int tick_usecs, int timer_usecs)
1118{
1119 struct port_info *pi = netdev_priv(dev);
1120 struct adapter *adap = pi->adapter;
1121 int timer[MAX_NPORTS];
1122 unsigned int port;
1123 int ret;
1124
1125
1126
1127
1128 for_each_port(adap, port)
1129 if (port == pi->port_id)
1130 timer[port] = timer_usecs;
1131 else
1132 timer[port] = get_dbqtimer(adap->port[port]);
1133
1134
1135 ret = set_dbqtimer_tick(dev, tick_usecs);
1136 if (ret)
1137 return ret;
1138
1139
1140 for_each_port(adap, port) {
1141 ret = set_dbqtimer(adap->port[port], timer[port]);
1142 if (ret)
1143 return ret;
1144 }
1145
1146 return 0;
1147}
1148
1149static int set_coalesce(struct net_device *dev,
1150 struct ethtool_coalesce *coalesce)
1151{
1152 int ret;
1153
1154 set_adaptive_rx_setting(dev, coalesce->use_adaptive_rx_coalesce);
1155
1156 ret = set_rx_intr_params(dev, coalesce->rx_coalesce_usecs,
1157 coalesce->rx_max_coalesced_frames);
1158 if (ret)
1159 return ret;
1160
1161 return set_dbqtimer_tickval(dev,
1162 coalesce->tx_coalesce_usecs_irq,
1163 coalesce->tx_coalesce_usecs);
1164}
1165
1166static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1167{
1168 const struct port_info *pi = netdev_priv(dev);
1169 const struct adapter *adap = pi->adapter;
1170 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1171
1172 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1173 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
1174 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1175 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
1176 c->tx_coalesce_usecs_irq = get_dbqtimer_tick(dev);
1177 c->tx_coalesce_usecs = get_dbqtimer(dev);
1178 return 0;
1179}
1180
1181
1182
1183static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1184{
1185 int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1186
1187 if (vaddr >= 0)
1188 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1189 return vaddr < 0 ? vaddr : 0;
1190}
1191
1192static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1193{
1194 int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1195
1196 if (vaddr >= 0)
1197 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1198 return vaddr < 0 ? vaddr : 0;
1199}
1200
1201#define EEPROM_MAGIC 0x38E2F10C
1202
1203static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1204 u8 *data)
1205{
1206 int i, err = 0;
1207 struct adapter *adapter = netdev2adap(dev);
1208 u8 *buf = kvzalloc(EEPROMSIZE, GFP_KERNEL);
1209
1210 if (!buf)
1211 return -ENOMEM;
1212
1213 e->magic = EEPROM_MAGIC;
1214 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1215 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1216
1217 if (!err)
1218 memcpy(data, buf + e->offset, e->len);
1219 kvfree(buf);
1220 return err;
1221}
1222
1223static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1224 u8 *data)
1225{
1226 u8 *buf;
1227 int err = 0;
1228 u32 aligned_offset, aligned_len, *p;
1229 struct adapter *adapter = netdev2adap(dev);
1230
1231 if (eeprom->magic != EEPROM_MAGIC)
1232 return -EINVAL;
1233
1234 aligned_offset = eeprom->offset & ~3;
1235 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1236
1237 if (adapter->pf > 0) {
1238 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1239
1240 if (aligned_offset < start ||
1241 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1242 return -EPERM;
1243 }
1244
1245 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1246
1247
1248 buf = kvzalloc(aligned_len, GFP_KERNEL);
1249 if (!buf)
1250 return -ENOMEM;
1251 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1252 if (!err && aligned_len > 4)
1253 err = eeprom_rd_phys(adapter,
1254 aligned_offset + aligned_len - 4,
1255 (u32 *)&buf[aligned_len - 4]);
1256 if (err)
1257 goto out;
1258 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1259 } else {
1260 buf = data;
1261 }
1262
1263 err = t4_seeprom_wp(adapter, false);
1264 if (err)
1265 goto out;
1266
1267 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1268 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1269 aligned_offset += 4;
1270 }
1271
1272 if (!err)
1273 err = t4_seeprom_wp(adapter, true);
1274out:
1275 if (buf != data)
1276 kvfree(buf);
1277 return err;
1278}
1279
1280static int cxgb4_ethtool_flash_bootcfg(struct net_device *netdev,
1281 const u8 *data, u32 size)
1282{
1283 struct adapter *adap = netdev2adap(netdev);
1284 int ret;
1285
1286 ret = t4_load_bootcfg(adap, data, size);
1287 if (ret)
1288 dev_err(adap->pdev_dev, "Failed to load boot cfg image\n");
1289
1290 return ret;
1291}
1292
1293static int cxgb4_ethtool_flash_boot(struct net_device *netdev,
1294 const u8 *bdata, u32 size)
1295{
1296 struct adapter *adap = netdev2adap(netdev);
1297 unsigned int offset;
1298 u8 *data;
1299 int ret;
1300
1301 data = kmemdup(bdata, size, GFP_KERNEL);
1302 if (!data)
1303 return -ENOMEM;
1304
1305 offset = OFFSET_G(t4_read_reg(adap, PF_REG(0, PCIE_PF_EXPROM_OFST_A)));
1306
1307 ret = t4_load_boot(adap, data, offset, size);
1308 if (ret)
1309 dev_err(adap->pdev_dev, "Failed to load boot image\n");
1310
1311 kfree(data);
1312 return ret;
1313}
1314
1315#define CXGB4_PHY_SIG 0x130000ea
1316
1317static int cxgb4_validate_phy_image(const u8 *data, u32 *size)
1318{
1319 struct cxgb4_fw_data *header;
1320
1321 header = (struct cxgb4_fw_data *)data;
1322 if (be32_to_cpu(header->signature) != CXGB4_PHY_SIG)
1323 return -EINVAL;
1324
1325 return 0;
1326}
1327
1328static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
1329 const u8 *data, u32 size)
1330{
1331 struct adapter *adap = netdev2adap(netdev);
1332 int ret;
1333
1334 ret = cxgb4_validate_phy_image(data, NULL);
1335 if (ret) {
1336 dev_err(adap->pdev_dev, "PHY signature mismatch\n");
1337 return ret;
1338 }
1339
1340
1341
1342
1343
1344
1345 ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
1346 if (ret < 0) {
1347 dev_err(adap->pdev_dev,
1348 "Set FW to RESET for flashing PHY FW failed. ret: %d\n",
1349 ret);
1350 return ret;
1351 }
1352
1353 ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
1354 if (ret < 0) {
1355 dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
1356 ret);
1357 return ret;
1358 }
1359
1360 return 0;
1361}
1362
1363static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
1364 const u8 *data, u32 size)
1365{
1366 struct adapter *adap = netdev2adap(netdev);
1367 unsigned int mbox = PCIE_FW_MASTER_M + 1;
1368 int ret;
1369
1370
1371
1372
1373
1374
1375 if (adap->flags & CXGB4_FULL_INIT_DONE)
1376 mbox = adap->mbox;
1377
1378 ret = t4_fw_upgrade(adap, mbox, data, size, 1);
1379 if (ret)
1380 dev_err(adap->pdev_dev,
1381 "Failed to flash firmware\n");
1382
1383 return ret;
1384}
1385
1386static int cxgb4_ethtool_flash_region(struct net_device *netdev,
1387 const u8 *data, u32 size, u32 region)
1388{
1389 struct adapter *adap = netdev2adap(netdev);
1390 int ret;
1391
1392 switch (region) {
1393 case CXGB4_ETHTOOL_FLASH_FW:
1394 ret = cxgb4_ethtool_flash_fw(netdev, data, size);
1395 break;
1396 case CXGB4_ETHTOOL_FLASH_PHY:
1397 ret = cxgb4_ethtool_flash_phy(netdev, data, size);
1398 break;
1399 case CXGB4_ETHTOOL_FLASH_BOOT:
1400 ret = cxgb4_ethtool_flash_boot(netdev, data, size);
1401 break;
1402 case CXGB4_ETHTOOL_FLASH_BOOTCFG:
1403 ret = cxgb4_ethtool_flash_bootcfg(netdev, data, size);
1404 break;
1405 default:
1406 ret = -EOPNOTSUPP;
1407 break;
1408 }
1409
1410 if (!ret)
1411 dev_info(adap->pdev_dev,
1412 "loading %s successful, reload cxgb4 driver\n",
1413 flash_region_strings[region]);
1414 return ret;
1415}
1416
1417#define CXGB4_FW_SIG 0x4368656c
1418#define CXGB4_FW_SIG_OFFSET 0x160
1419
1420static int cxgb4_validate_fw_image(const u8 *data, u32 *size)
1421{
1422 struct cxgb4_fw_data *header;
1423
1424 header = (struct cxgb4_fw_data *)&data[CXGB4_FW_SIG_OFFSET];
1425 if (be32_to_cpu(header->signature) != CXGB4_FW_SIG)
1426 return -EINVAL;
1427
1428 if (size)
1429 *size = be16_to_cpu(((struct fw_hdr *)data)->len512) * 512;
1430
1431 return 0;
1432}
1433
1434static int cxgb4_validate_bootcfg_image(const u8 *data, u32 *size)
1435{
1436 struct cxgb4_bootcfg_data *header;
1437
1438 header = (struct cxgb4_bootcfg_data *)data;
1439 if (le16_to_cpu(header->signature) != BOOT_CFG_SIG)
1440 return -EINVAL;
1441
1442 return 0;
1443}
1444
1445static int cxgb4_validate_boot_image(const u8 *data, u32 *size)
1446{
1447 struct cxgb4_pci_exp_rom_header *exp_header;
1448 struct cxgb4_pcir_data *pcir_header;
1449 struct legacy_pci_rom_hdr *header;
1450 const u8 *cur_header = data;
1451 u16 pcir_offset;
1452
1453 exp_header = (struct cxgb4_pci_exp_rom_header *)data;
1454
1455 if (le16_to_cpu(exp_header->signature) != BOOT_SIGNATURE)
1456 return -EINVAL;
1457
1458 if (size) {
1459 do {
1460 header = (struct legacy_pci_rom_hdr *)cur_header;
1461 pcir_offset = le16_to_cpu(header->pcir_offset);
1462 pcir_header = (struct cxgb4_pcir_data *)(cur_header +
1463 pcir_offset);
1464
1465 *size += header->size512 * 512;
1466 cur_header += header->size512 * 512;
1467 } while (!(pcir_header->indicator & CXGB4_HDR_INDI));
1468 }
1469
1470 return 0;
1471}
1472
1473static int cxgb4_ethtool_get_flash_region(const u8 *data, u32 *size)
1474{
1475 if (!cxgb4_validate_fw_image(data, size))
1476 return CXGB4_ETHTOOL_FLASH_FW;
1477 if (!cxgb4_validate_boot_image(data, size))
1478 return CXGB4_ETHTOOL_FLASH_BOOT;
1479 if (!cxgb4_validate_phy_image(data, size))
1480 return CXGB4_ETHTOOL_FLASH_PHY;
1481 if (!cxgb4_validate_bootcfg_image(data, size))
1482 return CXGB4_ETHTOOL_FLASH_BOOTCFG;
1483
1484 return -EOPNOTSUPP;
1485}
1486
1487static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1488{
1489 struct adapter *adap = netdev2adap(netdev);
1490 const struct firmware *fw;
1491 unsigned int master;
1492 u8 master_vld = 0;
1493 const u8 *fw_data;
1494 size_t fw_size;
1495 u32 size = 0;
1496 u32 pcie_fw;
1497 int region;
1498 int ret;
1499
1500 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
1501 master = PCIE_FW_MASTER_G(pcie_fw);
1502 if (pcie_fw & PCIE_FW_MASTER_VLD_F)
1503 master_vld = 1;
1504
1505 if (master_vld && (master != adap->pf)) {
1506 dev_warn(adap->pdev_dev,
1507 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
1508 return -EOPNOTSUPP;
1509 }
1510
1511 ef->data[sizeof(ef->data) - 1] = '\0';
1512 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1513 if (ret < 0)
1514 return ret;
1515
1516 fw_data = fw->data;
1517 fw_size = fw->size;
1518 if (ef->region == ETHTOOL_FLASH_ALL_REGIONS) {
1519 while (fw_size > 0) {
1520 size = 0;
1521 region = cxgb4_ethtool_get_flash_region(fw_data, &size);
1522 if (region < 0 || !size) {
1523 ret = region;
1524 goto out_free_fw;
1525 }
1526
1527 ret = cxgb4_ethtool_flash_region(netdev, fw_data, size,
1528 region);
1529 if (ret)
1530 goto out_free_fw;
1531
1532 fw_data += size;
1533 fw_size -= size;
1534 }
1535 } else {
1536 ret = cxgb4_ethtool_flash_region(netdev, fw_data, fw_size,
1537 ef->region);
1538 }
1539
1540out_free_fw:
1541 release_firmware(fw);
1542 return ret;
1543}
1544
1545static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
1546{
1547 struct port_info *pi = netdev_priv(dev);
1548 struct adapter *adapter = pi->adapter;
1549
1550 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1551 SOF_TIMESTAMPING_RX_SOFTWARE |
1552 SOF_TIMESTAMPING_SOFTWARE;
1553
1554 ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
1555 SOF_TIMESTAMPING_TX_HARDWARE |
1556 SOF_TIMESTAMPING_RAW_HARDWARE;
1557
1558 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1559 (1 << HWTSTAMP_TX_ON);
1560
1561 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1562 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1563 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1564 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
1565 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
1566 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
1567
1568 if (adapter->ptp_clock)
1569 ts_info->phc_index = ptp_clock_index(adapter->ptp_clock);
1570 else
1571 ts_info->phc_index = -1;
1572
1573 return 0;
1574}
1575
1576static u32 get_rss_table_size(struct net_device *dev)
1577{
1578 const struct port_info *pi = netdev_priv(dev);
1579
1580 return pi->rss_size;
1581}
1582
1583static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
1584{
1585 const struct port_info *pi = netdev_priv(dev);
1586 unsigned int n = pi->rss_size;
1587
1588 if (hfunc)
1589 *hfunc = ETH_RSS_HASH_TOP;
1590 if (!p)
1591 return 0;
1592 while (n--)
1593 p[n] = pi->rss[n];
1594 return 0;
1595}
1596
1597static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
1598 const u8 hfunc)
1599{
1600 unsigned int i;
1601 struct port_info *pi = netdev_priv(dev);
1602
1603
1604
1605
1606 if (key ||
1607 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
1608 return -EOPNOTSUPP;
1609 if (!p)
1610 return 0;
1611
1612
1613 if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) {
1614 for (i = 0; i < pi->rss_size; i++)
1615 pi->rss[i] = p[i];
1616
1617 return cxgb4_write_rss(pi, pi->rss);
1618 }
1619
1620 return -EPERM;
1621}
1622
1623static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
1624 u32 ftid)
1625{
1626 struct tid_info *t = &adap->tids;
1627
1628 if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
1629 return &t->hpftid_tab[ftid - t->hpftid_base];
1630
1631 if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
1632 return &t->ftid_tab[ftid - t->ftid_base];
1633
1634 return lookup_tid(t, ftid);
1635}
1636
1637static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
1638 struct ch_filter_specification *dfs)
1639{
1640 switch (dfs->val.proto) {
1641 case IPPROTO_TCP:
1642 if (dfs->type)
1643 fs->flow_type = TCP_V6_FLOW;
1644 else
1645 fs->flow_type = TCP_V4_FLOW;
1646 break;
1647 case IPPROTO_UDP:
1648 if (dfs->type)
1649 fs->flow_type = UDP_V6_FLOW;
1650 else
1651 fs->flow_type = UDP_V4_FLOW;
1652 break;
1653 }
1654
1655 if (dfs->type) {
1656 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->val.fport);
1657 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->mask.fport);
1658 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->val.lport);
1659 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->mask.lport);
1660 memcpy(&fs->h_u.tcp_ip6_spec.ip6src, &dfs->val.fip[0],
1661 sizeof(fs->h_u.tcp_ip6_spec.ip6src));
1662 memcpy(&fs->m_u.tcp_ip6_spec.ip6src, &dfs->mask.fip[0],
1663 sizeof(fs->m_u.tcp_ip6_spec.ip6src));
1664 memcpy(&fs->h_u.tcp_ip6_spec.ip6dst, &dfs->val.lip[0],
1665 sizeof(fs->h_u.tcp_ip6_spec.ip6dst));
1666 memcpy(&fs->m_u.tcp_ip6_spec.ip6dst, &dfs->mask.lip[0],
1667 sizeof(fs->m_u.tcp_ip6_spec.ip6dst));
1668 fs->h_u.tcp_ip6_spec.tclass = dfs->val.tos;
1669 fs->m_u.tcp_ip6_spec.tclass = dfs->mask.tos;
1670 } else {
1671 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->val.fport);
1672 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->mask.fport);
1673 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->val.lport);
1674 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->mask.lport);
1675 memcpy(&fs->h_u.tcp_ip4_spec.ip4src, &dfs->val.fip[0],
1676 sizeof(fs->h_u.tcp_ip4_spec.ip4src));
1677 memcpy(&fs->m_u.tcp_ip4_spec.ip4src, &dfs->mask.fip[0],
1678 sizeof(fs->m_u.tcp_ip4_spec.ip4src));
1679 memcpy(&fs->h_u.tcp_ip4_spec.ip4dst, &dfs->val.lip[0],
1680 sizeof(fs->h_u.tcp_ip4_spec.ip4dst));
1681 memcpy(&fs->m_u.tcp_ip4_spec.ip4dst, &dfs->mask.lip[0],
1682 sizeof(fs->m_u.tcp_ip4_spec.ip4dst));
1683 fs->h_u.tcp_ip4_spec.tos = dfs->val.tos;
1684 fs->m_u.tcp_ip4_spec.tos = dfs->mask.tos;
1685 }
1686 fs->h_ext.vlan_tci = cpu_to_be16(dfs->val.ivlan);
1687 fs->m_ext.vlan_tci = cpu_to_be16(dfs->mask.ivlan);
1688 fs->flow_type |= FLOW_EXT;
1689
1690 if (dfs->action == FILTER_DROP)
1691 fs->ring_cookie = RX_CLS_FLOW_DISC;
1692 else
1693 fs->ring_cookie = dfs->iq;
1694}
1695
1696static int cxgb4_ntuple_get_filter(struct net_device *dev,
1697 struct ethtool_rxnfc *cmd,
1698 unsigned int loc)
1699{
1700 const struct port_info *pi = netdev_priv(dev);
1701 struct adapter *adap = netdev2adap(dev);
1702 struct filter_entry *f;
1703 int ftid;
1704
1705 if (!(adap->flags & CXGB4_FULL_INIT_DONE))
1706 return -EAGAIN;
1707
1708
1709 if (!adap->ethtool_filters)
1710 return -EOPNOTSUPP;
1711
1712 if (loc >= adap->ethtool_filters->nentries)
1713 return -ERANGE;
1714
1715 if (!test_bit(loc, adap->ethtool_filters->port[pi->port_id].bmap))
1716 return -ENOENT;
1717
1718 ftid = adap->ethtool_filters->port[pi->port_id].loc_array[loc];
1719
1720
1721 f = cxgb4_get_filter_entry(adap, ftid);
1722
1723 cxgb4_fill_filter_rule(&cmd->fs, &f->fs);
1724
1725 return 0;
1726}
1727
1728static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1729 u32 *rules)
1730{
1731 const struct port_info *pi = netdev_priv(dev);
1732 struct adapter *adap = netdev2adap(dev);
1733 unsigned int count = 0, index = 0;
1734 int ret = 0;
1735
1736 switch (info->cmd) {
1737 case ETHTOOL_GRXFH: {
1738 unsigned int v = pi->rss_mode;
1739
1740 info->data = 0;
1741 switch (info->flow_type) {
1742 case TCP_V4_FLOW:
1743 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
1744 info->data = RXH_IP_SRC | RXH_IP_DST |
1745 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1746 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1747 info->data = RXH_IP_SRC | RXH_IP_DST;
1748 break;
1749 case UDP_V4_FLOW:
1750 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
1751 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1752 info->data = RXH_IP_SRC | RXH_IP_DST |
1753 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1754 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1755 info->data = RXH_IP_SRC | RXH_IP_DST;
1756 break;
1757 case SCTP_V4_FLOW:
1758 case AH_ESP_V4_FLOW:
1759 case IPV4_FLOW:
1760 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1761 info->data = RXH_IP_SRC | RXH_IP_DST;
1762 break;
1763 case TCP_V6_FLOW:
1764 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
1765 info->data = RXH_IP_SRC | RXH_IP_DST |
1766 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1767 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1768 info->data = RXH_IP_SRC | RXH_IP_DST;
1769 break;
1770 case UDP_V6_FLOW:
1771 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
1772 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1773 info->data = RXH_IP_SRC | RXH_IP_DST |
1774 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1775 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1776 info->data = RXH_IP_SRC | RXH_IP_DST;
1777 break;
1778 case SCTP_V6_FLOW:
1779 case AH_ESP_V6_FLOW:
1780 case IPV6_FLOW:
1781 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1782 info->data = RXH_IP_SRC | RXH_IP_DST;
1783 break;
1784 }
1785 return 0;
1786 }
1787 case ETHTOOL_GRXRINGS:
1788 info->data = pi->nqsets;
1789 return 0;
1790 case ETHTOOL_GRXCLSRLCNT:
1791 info->rule_cnt =
1792 adap->ethtool_filters->port[pi->port_id].in_use;
1793 return 0;
1794 case ETHTOOL_GRXCLSRULE:
1795 return cxgb4_ntuple_get_filter(dev, info, info->fs.location);
1796 case ETHTOOL_GRXCLSRLALL:
1797 info->data = adap->ethtool_filters->nentries;
1798 while (count < info->rule_cnt) {
1799 ret = cxgb4_ntuple_get_filter(dev, info, index);
1800 if (!ret)
1801 rules[count++] = index;
1802 index++;
1803 }
1804 return 0;
1805 }
1806
1807 return -EOPNOTSUPP;
1808}
1809
1810static int cxgb4_ntuple_del_filter(struct net_device *dev,
1811 struct ethtool_rxnfc *cmd)
1812{
1813 struct cxgb4_ethtool_filter_info *filter_info;
1814 struct adapter *adapter = netdev2adap(dev);
1815 struct port_info *pi = netdev_priv(dev);
1816 struct filter_entry *f;
1817 u32 filter_id;
1818 int ret;
1819
1820 if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
1821 return -EAGAIN;
1822
1823 if (!adapter->ethtool_filters)
1824 return -EOPNOTSUPP;
1825
1826 if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
1827 dev_err(adapter->pdev_dev,
1828 "Location must be < %u",
1829 adapter->ethtool_filters->nentries);
1830 return -ERANGE;
1831 }
1832
1833 filter_info = &adapter->ethtool_filters->port[pi->port_id];
1834
1835 if (!test_bit(cmd->fs.location, filter_info->bmap))
1836 return -ENOENT;
1837
1838 filter_id = filter_info->loc_array[cmd->fs.location];
1839 f = cxgb4_get_filter_entry(adapter, filter_id);
1840
1841 if (f->fs.prio)
1842 filter_id -= adapter->tids.hpftid_base;
1843 else if (!f->fs.hash)
1844 filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
1845
1846 ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
1847 if (ret)
1848 goto err;
1849
1850 clear_bit(cmd->fs.location, filter_info->bmap);
1851 filter_info->in_use--;
1852
1853err:
1854 return ret;
1855}
1856
1857
1858static int cxgb4_ntuple_set_filter(struct net_device *netdev,
1859 struct ethtool_rxnfc *cmd)
1860{
1861 struct ethtool_rx_flow_spec_input input = {};
1862 struct cxgb4_ethtool_filter_info *filter_info;
1863 struct adapter *adapter = netdev2adap(netdev);
1864 struct port_info *pi = netdev_priv(netdev);
1865 struct ch_filter_specification fs;
1866 struct ethtool_rx_flow_rule *flow;
1867 u32 tid;
1868 int ret;
1869
1870 if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
1871 return -EAGAIN;
1872
1873 if (!adapter->ethtool_filters)
1874 return -EOPNOTSUPP;
1875
1876 if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
1877 dev_err(adapter->pdev_dev,
1878 "Location must be < %u",
1879 adapter->ethtool_filters->nentries);
1880 return -ERANGE;
1881 }
1882
1883 if (test_bit(cmd->fs.location,
1884 adapter->ethtool_filters->port[pi->port_id].bmap))
1885 return -EEXIST;
1886
1887 memset(&fs, 0, sizeof(fs));
1888
1889 input.fs = &cmd->fs;
1890 flow = ethtool_rx_flow_rule_create(&input);
1891 if (IS_ERR(flow)) {
1892 ret = PTR_ERR(flow);
1893 goto exit;
1894 }
1895
1896 fs.hitcnts = 1;
1897
1898 ret = cxgb4_flow_rule_replace(netdev, flow->rule, cmd->fs.location,
1899 NULL, &fs, &tid);
1900 if (ret)
1901 goto free;
1902
1903 filter_info = &adapter->ethtool_filters->port[pi->port_id];
1904
1905 if (fs.prio)
1906 tid += adapter->tids.hpftid_base;
1907 else if (!fs.hash)
1908 tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
1909
1910 filter_info->loc_array[cmd->fs.location] = tid;
1911 set_bit(cmd->fs.location, filter_info->bmap);
1912 filter_info->in_use++;
1913
1914free:
1915 ethtool_rx_flow_rule_destroy(flow);
1916exit:
1917 return ret;
1918}
1919
1920static int set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1921{
1922 int ret = -EOPNOTSUPP;
1923
1924 switch (cmd->cmd) {
1925 case ETHTOOL_SRXCLSRLINS:
1926 ret = cxgb4_ntuple_set_filter(dev, cmd);
1927 break;
1928 case ETHTOOL_SRXCLSRLDEL:
1929 ret = cxgb4_ntuple_del_filter(dev, cmd);
1930 break;
1931 default:
1932 break;
1933 }
1934
1935 return ret;
1936}
1937
1938static int set_dump(struct net_device *dev, struct ethtool_dump *eth_dump)
1939{
1940 struct adapter *adapter = netdev2adap(dev);
1941 u32 len = 0;
1942
1943 len = sizeof(struct cudbg_hdr) +
1944 sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
1945 len += cxgb4_get_dump_length(adapter, eth_dump->flag);
1946
1947 adapter->eth_dump.flag = eth_dump->flag;
1948 adapter->eth_dump.len = len;
1949 return 0;
1950}
1951
1952static int get_dump_flag(struct net_device *dev, struct ethtool_dump *eth_dump)
1953{
1954 struct adapter *adapter = netdev2adap(dev);
1955
1956 eth_dump->flag = adapter->eth_dump.flag;
1957 eth_dump->len = adapter->eth_dump.len;
1958 eth_dump->version = adapter->eth_dump.version;
1959 return 0;
1960}
1961
1962static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
1963 void *buf)
1964{
1965 struct adapter *adapter = netdev2adap(dev);
1966 u32 len = 0;
1967 int ret = 0;
1968
1969 if (adapter->eth_dump.flag == CXGB4_ETH_DUMP_NONE)
1970 return -ENOENT;
1971
1972 len = sizeof(struct cudbg_hdr) +
1973 sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
1974 len += cxgb4_get_dump_length(adapter, adapter->eth_dump.flag);
1975 if (eth_dump->len < len)
1976 return -ENOMEM;
1977
1978 ret = cxgb4_cudbg_collect(adapter, buf, &len, adapter->eth_dump.flag);
1979 if (ret)
1980 return ret;
1981
1982 eth_dump->flag = adapter->eth_dump.flag;
1983 eth_dump->len = len;
1984 eth_dump->version = adapter->eth_dump.version;
1985 return 0;
1986}
1987
1988static int cxgb4_get_module_info(struct net_device *dev,
1989 struct ethtool_modinfo *modinfo)
1990{
1991 struct port_info *pi = netdev_priv(dev);
1992 u8 sff8472_comp, sff_diag_type, sff_rev;
1993 struct adapter *adapter = pi->adapter;
1994 int ret;
1995
1996 if (!t4_is_inserted_mod_type(pi->mod_type))
1997 return -EINVAL;
1998
1999 switch (pi->port_type) {
2000 case FW_PORT_TYPE_SFP:
2001 case FW_PORT_TYPE_QSA:
2002 case FW_PORT_TYPE_SFP28:
2003 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2004 I2C_DEV_ADDR_A0, SFF_8472_COMP_ADDR,
2005 SFF_8472_COMP_LEN, &sff8472_comp);
2006 if (ret)
2007 return ret;
2008 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2009 I2C_DEV_ADDR_A0, SFP_DIAG_TYPE_ADDR,
2010 SFP_DIAG_TYPE_LEN, &sff_diag_type);
2011 if (ret)
2012 return ret;
2013
2014 if (!sff8472_comp || (sff_diag_type & 4)) {
2015 modinfo->type = ETH_MODULE_SFF_8079;
2016 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
2017 } else {
2018 modinfo->type = ETH_MODULE_SFF_8472;
2019 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2020 }
2021 break;
2022
2023 case FW_PORT_TYPE_QSFP:
2024 case FW_PORT_TYPE_QSFP_10G:
2025 case FW_PORT_TYPE_CR_QSFP:
2026 case FW_PORT_TYPE_CR2_QSFP:
2027 case FW_PORT_TYPE_CR4_QSFP:
2028 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2029 I2C_DEV_ADDR_A0, SFF_REV_ADDR,
2030 SFF_REV_LEN, &sff_rev);
2031
2032
2033
2034 if (ret)
2035 return ret;
2036 if (sff_rev >= 0x3) {
2037 modinfo->type = ETH_MODULE_SFF_8636;
2038 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2039 } else {
2040 modinfo->type = ETH_MODULE_SFF_8436;
2041 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2042 }
2043 break;
2044
2045 default:
2046 return -EINVAL;
2047 }
2048
2049 return 0;
2050}
2051
2052static int cxgb4_get_module_eeprom(struct net_device *dev,
2053 struct ethtool_eeprom *eprom, u8 *data)
2054{
2055 int ret = 0, offset = eprom->offset, len = eprom->len;
2056 struct port_info *pi = netdev_priv(dev);
2057 struct adapter *adapter = pi->adapter;
2058
2059 memset(data, 0, eprom->len);
2060 if (offset + len <= I2C_PAGE_SIZE)
2061 return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2062 I2C_DEV_ADDR_A0, offset, len, data);
2063
2064
2065 if (offset <= I2C_PAGE_SIZE) {
2066
2067 len = I2C_PAGE_SIZE - offset;
2068 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2069 I2C_DEV_ADDR_A0, offset, len, data);
2070 if (ret)
2071 return ret;
2072 offset = I2C_PAGE_SIZE;
2073
2074
2075
2076 len = eprom->len - len;
2077 }
2078
2079 return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, I2C_DEV_ADDR_A2,
2080 offset, len, &data[eprom->len - len]);
2081}
2082
2083static u32 cxgb4_get_priv_flags(struct net_device *netdev)
2084{
2085 struct port_info *pi = netdev_priv(netdev);
2086 struct adapter *adapter = pi->adapter;
2087
2088 return (adapter->eth_flags | pi->eth_flags);
2089}
2090
2091
2092
2093
2094
2095
2096
2097static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags)
2098{
2099 *cur_flags = (*cur_flags & ~flags) | (new_flags & flags);
2100}
2101
2102static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
2103{
2104 struct port_info *pi = netdev_priv(netdev);
2105 struct adapter *adapter = pi->adapter;
2106
2107 set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP);
2108 set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT);
2109
2110 return 0;
2111}
2112
2113static void cxgb4_lb_test(struct net_device *netdev, u64 *lb_status)
2114{
2115 int dev_state = netif_running(netdev);
2116
2117 if (dev_state) {
2118 netif_tx_stop_all_queues(netdev);
2119 netif_carrier_off(netdev);
2120 }
2121
2122 *lb_status = cxgb4_selftest_lb_pkt(netdev);
2123
2124 if (dev_state) {
2125 netif_tx_start_all_queues(netdev);
2126 netif_carrier_on(netdev);
2127 }
2128}
2129
2130static void cxgb4_self_test(struct net_device *netdev,
2131 struct ethtool_test *eth_test, u64 *data)
2132{
2133 struct port_info *pi = netdev_priv(netdev);
2134 struct adapter *adap = pi->adapter;
2135
2136 memset(data, 0, sizeof(u64) * CXGB4_ETHTOOL_MAX_TEST);
2137
2138 if (!(adap->flags & CXGB4_FULL_INIT_DONE) ||
2139 !(adap->flags & CXGB4_FW_OK)) {
2140 eth_test->flags |= ETH_TEST_FL_FAILED;
2141 return;
2142 }
2143
2144 if (eth_test->flags & ETH_TEST_FL_OFFLINE)
2145 cxgb4_lb_test(netdev, &data[CXGB4_ETHTOOL_LB_TEST]);
2146
2147 if (data[CXGB4_ETHTOOL_LB_TEST])
2148 eth_test->flags |= ETH_TEST_FL_FAILED;
2149}
2150
2151static const struct ethtool_ops cxgb_ethtool_ops = {
2152 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2153 ETHTOOL_COALESCE_RX_MAX_FRAMES |
2154 ETHTOOL_COALESCE_TX_USECS_IRQ |
2155 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
2156 .get_link_ksettings = get_link_ksettings,
2157 .set_link_ksettings = set_link_ksettings,
2158 .get_fecparam = get_fecparam,
2159 .set_fecparam = set_fecparam,
2160 .get_drvinfo = get_drvinfo,
2161 .get_msglevel = get_msglevel,
2162 .set_msglevel = set_msglevel,
2163 .get_ringparam = get_sge_param,
2164 .set_ringparam = set_sge_param,
2165 .get_coalesce = get_coalesce,
2166 .set_coalesce = set_coalesce,
2167 .get_eeprom_len = get_eeprom_len,
2168 .get_eeprom = get_eeprom,
2169 .set_eeprom = set_eeprom,
2170 .get_pauseparam = get_pauseparam,
2171 .set_pauseparam = set_pauseparam,
2172 .get_link = ethtool_op_get_link,
2173 .get_strings = get_strings,
2174 .set_phys_id = identify_port,
2175 .nway_reset = restart_autoneg,
2176 .get_sset_count = get_sset_count,
2177 .get_ethtool_stats = get_stats,
2178 .get_regs_len = get_regs_len,
2179 .get_regs = get_regs,
2180 .get_rxnfc = get_rxnfc,
2181 .set_rxnfc = set_rxnfc,
2182 .get_rxfh_indir_size = get_rss_table_size,
2183 .get_rxfh = get_rss_table,
2184 .set_rxfh = set_rss_table,
2185 .self_test = cxgb4_self_test,
2186 .flash_device = set_flash,
2187 .get_ts_info = get_ts_info,
2188 .set_dump = set_dump,
2189 .get_dump_flag = get_dump_flag,
2190 .get_dump_data = get_dump_data,
2191 .get_module_info = cxgb4_get_module_info,
2192 .get_module_eeprom = cxgb4_get_module_eeprom,
2193 .get_priv_flags = cxgb4_get_priv_flags,
2194 .set_priv_flags = cxgb4_set_priv_flags,
2195};
2196
2197void cxgb4_cleanup_ethtool_filters(struct adapter *adap)
2198{
2199 struct cxgb4_ethtool_filter_info *eth_filter_info;
2200 u8 i;
2201
2202 if (!adap->ethtool_filters)
2203 return;
2204
2205 eth_filter_info = adap->ethtool_filters->port;
2206
2207 if (eth_filter_info) {
2208 for (i = 0; i < adap->params.nports; i++) {
2209 kvfree(eth_filter_info[i].loc_array);
2210 kfree(eth_filter_info[i].bmap);
2211 }
2212 kfree(eth_filter_info);
2213 }
2214
2215 kfree(adap->ethtool_filters);
2216}
2217
2218int cxgb4_init_ethtool_filters(struct adapter *adap)
2219{
2220 struct cxgb4_ethtool_filter_info *eth_filter_info;
2221 struct cxgb4_ethtool_filter *eth_filter;
2222 struct tid_info *tids = &adap->tids;
2223 u32 nentries, i;
2224 int ret;
2225
2226 eth_filter = kzalloc(sizeof(*eth_filter), GFP_KERNEL);
2227 if (!eth_filter)
2228 return -ENOMEM;
2229
2230 eth_filter_info = kcalloc(adap->params.nports,
2231 sizeof(*eth_filter_info),
2232 GFP_KERNEL);
2233 if (!eth_filter_info) {
2234 ret = -ENOMEM;
2235 goto free_eth_filter;
2236 }
2237
2238 eth_filter->port = eth_filter_info;
2239
2240 nentries = tids->nhpftids + tids->nftids;
2241 if (is_hashfilter(adap))
2242 nentries += tids->nhash +
2243 (adap->tids.stid_base - adap->tids.tid_base);
2244 eth_filter->nentries = nentries;
2245
2246 for (i = 0; i < adap->params.nports; i++) {
2247 eth_filter->port[i].loc_array = kvzalloc(nentries, GFP_KERNEL);
2248 if (!eth_filter->port[i].loc_array) {
2249 ret = -ENOMEM;
2250 goto free_eth_finfo;
2251 }
2252
2253 eth_filter->port[i].bmap = kcalloc(BITS_TO_LONGS(nentries),
2254 sizeof(unsigned long),
2255 GFP_KERNEL);
2256 if (!eth_filter->port[i].bmap) {
2257 ret = -ENOMEM;
2258 goto free_eth_finfo;
2259 }
2260 }
2261
2262 adap->ethtool_filters = eth_filter;
2263 return 0;
2264
2265free_eth_finfo:
2266 while (i-- > 0) {
2267 kfree(eth_filter->port[i].bmap);
2268 kvfree(eth_filter->port[i].loc_array);
2269 }
2270 kfree(eth_filter_info);
2271
2272free_eth_filter:
2273 kfree(eth_filter);
2274
2275 return ret;
2276}
2277
2278void cxgb4_set_ethtool_ops(struct net_device *netdev)
2279{
2280 netdev->ethtool_ops = &cxgb_ethtool_ops;
2281}
2282