1
2
3
4
5
6
7
8
9
10
11#include <linux/ctype.h>
12#include <linux/stringify.h>
13#include <linux/ethtool.h>
14#include <linux/linkmode.h>
15#include <linux/interrupt.h>
16#include <linux/pci.h>
17#include <linux/etherdevice.h>
18#include <linux/crc32.h>
19#include <linux/firmware.h>
20#include <linux/utsname.h>
21#include <linux/time.h>
22#include <linux/ptp_clock_kernel.h>
23#include <linux/net_tstamp.h>
24#include <linux/timecounter.h>
25#include "bnxt_hsi.h"
26#include "bnxt.h"
27#include "bnxt_xdp.h"
28#include "bnxt_ptp.h"
29#include "bnxt_ethtool.h"
30#include "bnxt_nvm_defs.h"
31#include "bnxt_fw_hdr.h"
32#include "bnxt_coredump.h"
33#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
34#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
35#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
36
37static u32 bnxt_get_msglevel(struct net_device *dev)
38{
39 struct bnxt *bp = netdev_priv(dev);
40
41 return bp->msg_enable;
42}
43
44static void bnxt_set_msglevel(struct net_device *dev, u32 value)
45{
46 struct bnxt *bp = netdev_priv(dev);
47
48 bp->msg_enable = value;
49}
50
51static int bnxt_get_coalesce(struct net_device *dev,
52 struct ethtool_coalesce *coal)
53{
54 struct bnxt *bp = netdev_priv(dev);
55 struct bnxt_coal *hw_coal;
56 u16 mult;
57
58 memset(coal, 0, sizeof(*coal));
59
60 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
61
62 hw_coal = &bp->rx_coal;
63 mult = hw_coal->bufs_per_record;
64 coal->rx_coalesce_usecs = hw_coal->coal_ticks;
65 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
66 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
67 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
68
69 hw_coal = &bp->tx_coal;
70 mult = hw_coal->bufs_per_record;
71 coal->tx_coalesce_usecs = hw_coal->coal_ticks;
72 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
73 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
74 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
75
76 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
77
78 return 0;
79}
80
81static int bnxt_set_coalesce(struct net_device *dev,
82 struct ethtool_coalesce *coal)
83{
84 struct bnxt *bp = netdev_priv(dev);
85 bool update_stats = false;
86 struct bnxt_coal *hw_coal;
87 int rc = 0;
88 u16 mult;
89
90 if (coal->use_adaptive_rx_coalesce) {
91 bp->flags |= BNXT_FLAG_DIM;
92 } else {
93 if (bp->flags & BNXT_FLAG_DIM) {
94 bp->flags &= ~(BNXT_FLAG_DIM);
95 goto reset_coalesce;
96 }
97 }
98
99 hw_coal = &bp->rx_coal;
100 mult = hw_coal->bufs_per_record;
101 hw_coal->coal_ticks = coal->rx_coalesce_usecs;
102 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
103 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
104 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
105
106 hw_coal = &bp->tx_coal;
107 mult = hw_coal->bufs_per_record;
108 hw_coal->coal_ticks = coal->tx_coalesce_usecs;
109 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
110 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
111 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
112
113 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
114 u32 stats_ticks = coal->stats_block_coalesce_usecs;
115
116
117 if (stats_ticks)
118 stats_ticks = clamp_t(u32, stats_ticks,
119 BNXT_MIN_STATS_COAL_TICKS,
120 BNXT_MAX_STATS_COAL_TICKS);
121 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
122 bp->stats_coal_ticks = stats_ticks;
123 if (bp->stats_coal_ticks)
124 bp->current_interval =
125 bp->stats_coal_ticks * HZ / 1000000;
126 else
127 bp->current_interval = BNXT_TIMER_INTERVAL;
128 update_stats = true;
129 }
130
131reset_coalesce:
132 if (netif_running(dev)) {
133 if (update_stats) {
134 rc = bnxt_close_nic(bp, true, false);
135 if (!rc)
136 rc = bnxt_open_nic(bp, true, false);
137 } else {
138 rc = bnxt_hwrm_set_coal(bp);
139 }
140 }
141
142 return rc;
143}
144
145static const char * const bnxt_ring_rx_stats_str[] = {
146 "rx_ucast_packets",
147 "rx_mcast_packets",
148 "rx_bcast_packets",
149 "rx_discards",
150 "rx_errors",
151 "rx_ucast_bytes",
152 "rx_mcast_bytes",
153 "rx_bcast_bytes",
154};
155
156static const char * const bnxt_ring_tx_stats_str[] = {
157 "tx_ucast_packets",
158 "tx_mcast_packets",
159 "tx_bcast_packets",
160 "tx_errors",
161 "tx_discards",
162 "tx_ucast_bytes",
163 "tx_mcast_bytes",
164 "tx_bcast_bytes",
165};
166
167static const char * const bnxt_ring_tpa_stats_str[] = {
168 "tpa_packets",
169 "tpa_bytes",
170 "tpa_events",
171 "tpa_aborts",
172};
173
174static const char * const bnxt_ring_tpa2_stats_str[] = {
175 "rx_tpa_eligible_pkt",
176 "rx_tpa_eligible_bytes",
177 "rx_tpa_pkt",
178 "rx_tpa_bytes",
179 "rx_tpa_errors",
180 "rx_tpa_events",
181};
182
183static const char * const bnxt_rx_sw_stats_str[] = {
184 "rx_l4_csum_errors",
185 "rx_resets",
186 "rx_buf_errors",
187};
188
189static const char * const bnxt_cmn_sw_stats_str[] = {
190 "missed_irqs",
191};
192
193#define BNXT_RX_STATS_ENTRY(counter) \
194 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
195
196#define BNXT_TX_STATS_ENTRY(counter) \
197 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
198
199#define BNXT_RX_STATS_EXT_ENTRY(counter) \
200 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
201
202#define BNXT_TX_STATS_EXT_ENTRY(counter) \
203 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
204
205#define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \
206 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \
207 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
208
209#define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \
210 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \
211 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
212
213#define BNXT_RX_STATS_EXT_PFC_ENTRIES \
214 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \
215 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \
216 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \
217 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \
218 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \
219 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \
220 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \
221 BNXT_RX_STATS_EXT_PFC_ENTRY(7)
222
223#define BNXT_TX_STATS_EXT_PFC_ENTRIES \
224 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \
225 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \
226 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \
227 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \
228 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \
229 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \
230 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \
231 BNXT_TX_STATS_EXT_PFC_ENTRY(7)
232
233#define BNXT_RX_STATS_EXT_COS_ENTRY(n) \
234 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \
235 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
236
237#define BNXT_TX_STATS_EXT_COS_ENTRY(n) \
238 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \
239 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
240
241#define BNXT_RX_STATS_EXT_COS_ENTRIES \
242 BNXT_RX_STATS_EXT_COS_ENTRY(0), \
243 BNXT_RX_STATS_EXT_COS_ENTRY(1), \
244 BNXT_RX_STATS_EXT_COS_ENTRY(2), \
245 BNXT_RX_STATS_EXT_COS_ENTRY(3), \
246 BNXT_RX_STATS_EXT_COS_ENTRY(4), \
247 BNXT_RX_STATS_EXT_COS_ENTRY(5), \
248 BNXT_RX_STATS_EXT_COS_ENTRY(6), \
249 BNXT_RX_STATS_EXT_COS_ENTRY(7) \
250
251#define BNXT_TX_STATS_EXT_COS_ENTRIES \
252 BNXT_TX_STATS_EXT_COS_ENTRY(0), \
253 BNXT_TX_STATS_EXT_COS_ENTRY(1), \
254 BNXT_TX_STATS_EXT_COS_ENTRY(2), \
255 BNXT_TX_STATS_EXT_COS_ENTRY(3), \
256 BNXT_TX_STATS_EXT_COS_ENTRY(4), \
257 BNXT_TX_STATS_EXT_COS_ENTRY(5), \
258 BNXT_TX_STATS_EXT_COS_ENTRY(6), \
259 BNXT_TX_STATS_EXT_COS_ENTRY(7) \
260
261#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \
262 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \
263 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
264
265#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \
266 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \
267 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \
268 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \
269 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \
270 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \
271 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \
272 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \
273 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
274
275#define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
276 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \
277 __stringify(counter##_pri##n) }
278
279#define BNXT_TX_STATS_PRI_ENTRY(counter, n) \
280 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \
281 __stringify(counter##_pri##n) }
282
283#define BNXT_RX_STATS_PRI_ENTRIES(counter) \
284 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \
285 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \
286 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \
287 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \
288 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \
289 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \
290 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \
291 BNXT_RX_STATS_PRI_ENTRY(counter, 7)
292
293#define BNXT_TX_STATS_PRI_ENTRIES(counter) \
294 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \
295 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \
296 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \
297 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \
298 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \
299 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \
300 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
301 BNXT_TX_STATS_PRI_ENTRY(counter, 7)
302
303enum {
304 RX_TOTAL_DISCARDS,
305 TX_TOTAL_DISCARDS,
306};
307
308static struct {
309 u64 counter;
310 char string[ETH_GSTRING_LEN];
311} bnxt_sw_func_stats[] = {
312 {0, "rx_total_discard_pkts"},
313 {0, "tx_total_discard_pkts"},
314};
315
316#define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str)
317#define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str)
318#define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str)
319#define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str)
320
321static const struct {
322 long offset;
323 char string[ETH_GSTRING_LEN];
324} bnxt_port_stats_arr[] = {
325 BNXT_RX_STATS_ENTRY(rx_64b_frames),
326 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
327 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
328 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
329 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
330 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
331 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
332 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
333 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
334 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
335 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
336 BNXT_RX_STATS_ENTRY(rx_total_frames),
337 BNXT_RX_STATS_ENTRY(rx_ucast_frames),
338 BNXT_RX_STATS_ENTRY(rx_mcast_frames),
339 BNXT_RX_STATS_ENTRY(rx_bcast_frames),
340 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
341 BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
342 BNXT_RX_STATS_ENTRY(rx_pause_frames),
343 BNXT_RX_STATS_ENTRY(rx_pfc_frames),
344 BNXT_RX_STATS_ENTRY(rx_align_err_frames),
345 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
346 BNXT_RX_STATS_ENTRY(rx_jbr_frames),
347 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
348 BNXT_RX_STATS_ENTRY(rx_tagged_frames),
349 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
350 BNXT_RX_STATS_ENTRY(rx_good_frames),
351 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
352 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
353 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
354 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
355 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
356 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
357 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
358 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
359 BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
360 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
361 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
362 BNXT_RX_STATS_ENTRY(rx_bytes),
363 BNXT_RX_STATS_ENTRY(rx_runt_bytes),
364 BNXT_RX_STATS_ENTRY(rx_runt_frames),
365 BNXT_RX_STATS_ENTRY(rx_stat_discard),
366 BNXT_RX_STATS_ENTRY(rx_stat_err),
367
368 BNXT_TX_STATS_ENTRY(tx_64b_frames),
369 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
370 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
371 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
372 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
373 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
374 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
375 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
376 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
377 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
378 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
379 BNXT_TX_STATS_ENTRY(tx_good_frames),
380 BNXT_TX_STATS_ENTRY(tx_total_frames),
381 BNXT_TX_STATS_ENTRY(tx_ucast_frames),
382 BNXT_TX_STATS_ENTRY(tx_mcast_frames),
383 BNXT_TX_STATS_ENTRY(tx_bcast_frames),
384 BNXT_TX_STATS_ENTRY(tx_pause_frames),
385 BNXT_TX_STATS_ENTRY(tx_pfc_frames),
386 BNXT_TX_STATS_ENTRY(tx_jabber_frames),
387 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
388 BNXT_TX_STATS_ENTRY(tx_err),
389 BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
390 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
391 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
392 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
393 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
394 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
395 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
396 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
397 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
398 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
399 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
400 BNXT_TX_STATS_ENTRY(tx_total_collisions),
401 BNXT_TX_STATS_ENTRY(tx_bytes),
402 BNXT_TX_STATS_ENTRY(tx_xthol_frames),
403 BNXT_TX_STATS_ENTRY(tx_stat_discard),
404 BNXT_TX_STATS_ENTRY(tx_stat_error),
405};
406
407static const struct {
408 long offset;
409 char string[ETH_GSTRING_LEN];
410} bnxt_port_stats_ext_arr[] = {
411 BNXT_RX_STATS_EXT_ENTRY(link_down_events),
412 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
413 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
414 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
415 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
416 BNXT_RX_STATS_EXT_COS_ENTRIES,
417 BNXT_RX_STATS_EXT_PFC_ENTRIES,
418 BNXT_RX_STATS_EXT_ENTRY(rx_bits),
419 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
420 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
421 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
422 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
423};
424
425static const struct {
426 long offset;
427 char string[ETH_GSTRING_LEN];
428} bnxt_tx_port_stats_ext_arr[] = {
429 BNXT_TX_STATS_EXT_COS_ENTRIES,
430 BNXT_TX_STATS_EXT_PFC_ENTRIES,
431};
432
433static const struct {
434 long base_off;
435 char string[ETH_GSTRING_LEN];
436} bnxt_rx_bytes_pri_arr[] = {
437 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
438};
439
440static const struct {
441 long base_off;
442 char string[ETH_GSTRING_LEN];
443} bnxt_rx_pkts_pri_arr[] = {
444 BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
445};
446
447static const struct {
448 long base_off;
449 char string[ETH_GSTRING_LEN];
450} bnxt_tx_bytes_pri_arr[] = {
451 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
452};
453
454static const struct {
455 long base_off;
456 char string[ETH_GSTRING_LEN];
457} bnxt_tx_pkts_pri_arr[] = {
458 BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
459};
460
461#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
462#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
463#define BNXT_NUM_STATS_PRI \
464 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \
465 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
466 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
467 ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
468
469static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
470{
471 if (BNXT_SUPPORTS_TPA(bp)) {
472 if (bp->max_tpa_v2) {
473 if (BNXT_CHIP_P5_THOR(bp))
474 return BNXT_NUM_TPA_RING_STATS_P5;
475 return BNXT_NUM_TPA_RING_STATS_P5_SR2;
476 }
477 return BNXT_NUM_TPA_RING_STATS;
478 }
479 return 0;
480}
481
482static int bnxt_get_num_ring_stats(struct bnxt *bp)
483{
484 int rx, tx, cmn;
485
486 rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
487 bnxt_get_num_tpa_ring_stats(bp);
488 tx = NUM_RING_TX_HW_STATS;
489 cmn = NUM_RING_CMN_SW_STATS;
490 return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
491 cmn * bp->cp_nr_rings;
492}
493
494static int bnxt_get_num_stats(struct bnxt *bp)
495{
496 int num_stats = bnxt_get_num_ring_stats(bp);
497
498 num_stats += BNXT_NUM_SW_FUNC_STATS;
499
500 if (bp->flags & BNXT_FLAG_PORT_STATS)
501 num_stats += BNXT_NUM_PORT_STATS;
502
503 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
504 num_stats += bp->fw_rx_stats_ext_size +
505 bp->fw_tx_stats_ext_size;
506 if (bp->pri2cos_valid)
507 num_stats += BNXT_NUM_STATS_PRI;
508 }
509
510 return num_stats;
511}
512
513static int bnxt_get_sset_count(struct net_device *dev, int sset)
514{
515 struct bnxt *bp = netdev_priv(dev);
516
517 switch (sset) {
518 case ETH_SS_STATS:
519 return bnxt_get_num_stats(bp);
520 case ETH_SS_TEST:
521 if (!bp->num_tests)
522 return -EOPNOTSUPP;
523 return bp->num_tests;
524 default:
525 return -EOPNOTSUPP;
526 }
527}
528
529static bool is_rx_ring(struct bnxt *bp, int ring_num)
530{
531 return ring_num < bp->rx_nr_rings;
532}
533
534static bool is_tx_ring(struct bnxt *bp, int ring_num)
535{
536 int tx_base = 0;
537
538 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
539 tx_base = bp->rx_nr_rings;
540
541 if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
542 return true;
543 return false;
544}
545
546static void bnxt_get_ethtool_stats(struct net_device *dev,
547 struct ethtool_stats *stats, u64 *buf)
548{
549 u32 i, j = 0;
550 struct bnxt *bp = netdev_priv(dev);
551 u32 tpa_stats;
552
553 if (!bp->bnapi) {
554 j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
555 goto skip_ring_stats;
556 }
557
558 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
559 bnxt_sw_func_stats[i].counter = 0;
560
561 tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
562 for (i = 0; i < bp->cp_nr_rings; i++) {
563 struct bnxt_napi *bnapi = bp->bnapi[i];
564 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
565 u64 *sw_stats = cpr->stats.sw_stats;
566 u64 *sw;
567 int k;
568
569 if (is_rx_ring(bp, i)) {
570 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
571 buf[j] = sw_stats[k];
572 }
573 if (is_tx_ring(bp, i)) {
574 k = NUM_RING_RX_HW_STATS;
575 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
576 j++, k++)
577 buf[j] = sw_stats[k];
578 }
579 if (!tpa_stats || !is_rx_ring(bp, i))
580 goto skip_tpa_ring_stats;
581
582 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
583 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
584 tpa_stats; j++, k++)
585 buf[j] = sw_stats[k];
586
587skip_tpa_ring_stats:
588 sw = (u64 *)&cpr->sw_stats.rx;
589 if (is_rx_ring(bp, i)) {
590 for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
591 buf[j] = sw[k];
592 }
593
594 sw = (u64 *)&cpr->sw_stats.cmn;
595 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
596 buf[j] = sw[k];
597
598 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
599 BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts);
600 bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
601 BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts);
602 }
603
604 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
605 buf[j] = bnxt_sw_func_stats[i].counter;
606
607skip_ring_stats:
608 if (bp->flags & BNXT_FLAG_PORT_STATS) {
609 u64 *port_stats = bp->port_stats.sw_stats;
610
611 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
612 buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
613 }
614 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
615 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
616 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
617
618 for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
619 buf[j] = *(rx_port_stats_ext +
620 bnxt_port_stats_ext_arr[i].offset);
621 }
622 for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
623 buf[j] = *(tx_port_stats_ext +
624 bnxt_tx_port_stats_ext_arr[i].offset);
625 }
626 if (bp->pri2cos_valid) {
627 for (i = 0; i < 8; i++, j++) {
628 long n = bnxt_rx_bytes_pri_arr[i].base_off +
629 bp->pri2cos_idx[i];
630
631 buf[j] = *(rx_port_stats_ext + n);
632 }
633 for (i = 0; i < 8; i++, j++) {
634 long n = bnxt_rx_pkts_pri_arr[i].base_off +
635 bp->pri2cos_idx[i];
636
637 buf[j] = *(rx_port_stats_ext + n);
638 }
639 for (i = 0; i < 8; i++, j++) {
640 long n = bnxt_tx_bytes_pri_arr[i].base_off +
641 bp->pri2cos_idx[i];
642
643 buf[j] = *(tx_port_stats_ext + n);
644 }
645 for (i = 0; i < 8; i++, j++) {
646 long n = bnxt_tx_pkts_pri_arr[i].base_off +
647 bp->pri2cos_idx[i];
648
649 buf[j] = *(tx_port_stats_ext + n);
650 }
651 }
652 }
653}
654
655static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
656{
657 struct bnxt *bp = netdev_priv(dev);
658 static const char * const *str;
659 u32 i, j, num_str;
660
661 switch (stringset) {
662 case ETH_SS_STATS:
663 for (i = 0; i < bp->cp_nr_rings; i++) {
664 if (is_rx_ring(bp, i)) {
665 num_str = NUM_RING_RX_HW_STATS;
666 for (j = 0; j < num_str; j++) {
667 sprintf(buf, "[%d]: %s", i,
668 bnxt_ring_rx_stats_str[j]);
669 buf += ETH_GSTRING_LEN;
670 }
671 }
672 if (is_tx_ring(bp, i)) {
673 num_str = NUM_RING_TX_HW_STATS;
674 for (j = 0; j < num_str; j++) {
675 sprintf(buf, "[%d]: %s", i,
676 bnxt_ring_tx_stats_str[j]);
677 buf += ETH_GSTRING_LEN;
678 }
679 }
680 num_str = bnxt_get_num_tpa_ring_stats(bp);
681 if (!num_str || !is_rx_ring(bp, i))
682 goto skip_tpa_stats;
683
684 if (bp->max_tpa_v2)
685 str = bnxt_ring_tpa2_stats_str;
686 else
687 str = bnxt_ring_tpa_stats_str;
688
689 for (j = 0; j < num_str; j++) {
690 sprintf(buf, "[%d]: %s", i, str[j]);
691 buf += ETH_GSTRING_LEN;
692 }
693skip_tpa_stats:
694 if (is_rx_ring(bp, i)) {
695 num_str = NUM_RING_RX_SW_STATS;
696 for (j = 0; j < num_str; j++) {
697 sprintf(buf, "[%d]: %s", i,
698 bnxt_rx_sw_stats_str[j]);
699 buf += ETH_GSTRING_LEN;
700 }
701 }
702 num_str = NUM_RING_CMN_SW_STATS;
703 for (j = 0; j < num_str; j++) {
704 sprintf(buf, "[%d]: %s", i,
705 bnxt_cmn_sw_stats_str[j]);
706 buf += ETH_GSTRING_LEN;
707 }
708 }
709 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
710 strcpy(buf, bnxt_sw_func_stats[i].string);
711 buf += ETH_GSTRING_LEN;
712 }
713
714 if (bp->flags & BNXT_FLAG_PORT_STATS) {
715 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
716 strcpy(buf, bnxt_port_stats_arr[i].string);
717 buf += ETH_GSTRING_LEN;
718 }
719 }
720 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
721 for (i = 0; i < bp->fw_rx_stats_ext_size; i++) {
722 strcpy(buf, bnxt_port_stats_ext_arr[i].string);
723 buf += ETH_GSTRING_LEN;
724 }
725 for (i = 0; i < bp->fw_tx_stats_ext_size; i++) {
726 strcpy(buf,
727 bnxt_tx_port_stats_ext_arr[i].string);
728 buf += ETH_GSTRING_LEN;
729 }
730 if (bp->pri2cos_valid) {
731 for (i = 0; i < 8; i++) {
732 strcpy(buf,
733 bnxt_rx_bytes_pri_arr[i].string);
734 buf += ETH_GSTRING_LEN;
735 }
736 for (i = 0; i < 8; i++) {
737 strcpy(buf,
738 bnxt_rx_pkts_pri_arr[i].string);
739 buf += ETH_GSTRING_LEN;
740 }
741 for (i = 0; i < 8; i++) {
742 strcpy(buf,
743 bnxt_tx_bytes_pri_arr[i].string);
744 buf += ETH_GSTRING_LEN;
745 }
746 for (i = 0; i < 8; i++) {
747 strcpy(buf,
748 bnxt_tx_pkts_pri_arr[i].string);
749 buf += ETH_GSTRING_LEN;
750 }
751 }
752 }
753 break;
754 case ETH_SS_TEST:
755 if (bp->num_tests)
756 memcpy(buf, bp->test_info->string,
757 bp->num_tests * ETH_GSTRING_LEN);
758 break;
759 default:
760 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
761 stringset);
762 break;
763 }
764}
765
766static void bnxt_get_ringparam(struct net_device *dev,
767 struct ethtool_ringparam *ering)
768{
769 struct bnxt *bp = netdev_priv(dev);
770
771 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
772 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
773 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
774
775 ering->rx_pending = bp->rx_ring_size;
776 ering->rx_jumbo_pending = bp->rx_agg_ring_size;
777 ering->tx_pending = bp->tx_ring_size;
778}
779
780static int bnxt_set_ringparam(struct net_device *dev,
781 struct ethtool_ringparam *ering)
782{
783 struct bnxt *bp = netdev_priv(dev);
784
785 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
786 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
787 (ering->tx_pending <= MAX_SKB_FRAGS))
788 return -EINVAL;
789
790 if (netif_running(dev))
791 bnxt_close_nic(bp, false, false);
792
793 bp->rx_ring_size = ering->rx_pending;
794 bp->tx_ring_size = ering->tx_pending;
795 bnxt_set_ring_params(bp);
796
797 if (netif_running(dev))
798 return bnxt_open_nic(bp, false, false);
799
800 return 0;
801}
802
803static void bnxt_get_channels(struct net_device *dev,
804 struct ethtool_channels *channel)
805{
806 struct bnxt *bp = netdev_priv(dev);
807 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
808 int max_rx_rings, max_tx_rings, tcs;
809 int max_tx_sch_inputs, tx_grps;
810
811
812 if (netif_running(dev) && BNXT_NEW_RM(bp))
813 bnxt_hwrm_func_resc_qcaps(bp, false);
814 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
815
816 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
817 if (max_tx_sch_inputs)
818 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
819
820 tcs = netdev_get_num_tc(dev);
821 tx_grps = max(tcs, 1);
822 if (bp->tx_nr_rings_xdp)
823 tx_grps++;
824 max_tx_rings /= tx_grps;
825 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
826
827 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
828 max_rx_rings = 0;
829 max_tx_rings = 0;
830 }
831 if (max_tx_sch_inputs)
832 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
833
834 if (tcs > 1)
835 max_tx_rings /= tcs;
836
837 channel->max_rx = max_rx_rings;
838 channel->max_tx = max_tx_rings;
839 channel->max_other = 0;
840 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
841 channel->combined_count = bp->rx_nr_rings;
842 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
843 channel->combined_count--;
844 } else {
845 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
846 channel->rx_count = bp->rx_nr_rings;
847 channel->tx_count = bp->tx_nr_rings_per_tc;
848 }
849 }
850}
851
852static int bnxt_set_channels(struct net_device *dev,
853 struct ethtool_channels *channel)
854{
855 struct bnxt *bp = netdev_priv(dev);
856 int req_tx_rings, req_rx_rings, tcs;
857 bool sh = false;
858 int tx_xdp = 0;
859 int rc = 0;
860
861 if (channel->other_count)
862 return -EINVAL;
863
864 if (!channel->combined_count &&
865 (!channel->rx_count || !channel->tx_count))
866 return -EINVAL;
867
868 if (channel->combined_count &&
869 (channel->rx_count || channel->tx_count))
870 return -EINVAL;
871
872 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
873 channel->tx_count))
874 return -EINVAL;
875
876 if (channel->combined_count)
877 sh = true;
878
879 tcs = netdev_get_num_tc(dev);
880
881 req_tx_rings = sh ? channel->combined_count : channel->tx_count;
882 req_rx_rings = sh ? channel->combined_count : channel->rx_count;
883 if (bp->tx_nr_rings_xdp) {
884 if (!sh) {
885 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
886 return -EINVAL;
887 }
888 tx_xdp = req_rx_rings;
889 }
890 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
891 if (rc) {
892 netdev_warn(dev, "Unable to allocate the requested rings\n");
893 return rc;
894 }
895
896 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
897 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
898 (dev->priv_flags & IFF_RXFH_CONFIGURED)) {
899 netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
900 return -EINVAL;
901 }
902
903 if (netif_running(dev)) {
904 if (BNXT_PF(bp)) {
905
906
907
908 }
909 rc = bnxt_close_nic(bp, true, false);
910 if (rc) {
911 netdev_err(bp->dev, "Set channel failure rc :%x\n",
912 rc);
913 return rc;
914 }
915 }
916
917 if (sh) {
918 bp->flags |= BNXT_FLAG_SHARED_RINGS;
919 bp->rx_nr_rings = channel->combined_count;
920 bp->tx_nr_rings_per_tc = channel->combined_count;
921 } else {
922 bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
923 bp->rx_nr_rings = channel->rx_count;
924 bp->tx_nr_rings_per_tc = channel->tx_count;
925 }
926 bp->tx_nr_rings_xdp = tx_xdp;
927 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
928 if (tcs > 1)
929 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
930
931 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
932 bp->tx_nr_rings + bp->rx_nr_rings;
933
934
935 netdev_update_features(dev);
936 if (netif_running(dev)) {
937 rc = bnxt_open_nic(bp, true, false);
938 if ((!rc) && BNXT_PF(bp)) {
939
940
941
942 }
943 } else {
944 rc = bnxt_reserve_rings(bp, true);
945 }
946
947 return rc;
948}
949
950#ifdef CONFIG_RFS_ACCEL
951static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
952 u32 *rule_locs)
953{
954 int i, j = 0;
955
956 cmd->data = bp->ntp_fltr_count;
957 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
958 struct hlist_head *head;
959 struct bnxt_ntuple_filter *fltr;
960
961 head = &bp->ntp_fltr_hash_tbl[i];
962 rcu_read_lock();
963 hlist_for_each_entry_rcu(fltr, head, hash) {
964 if (j == cmd->rule_cnt)
965 break;
966 rule_locs[j++] = fltr->sw_id;
967 }
968 rcu_read_unlock();
969 if (j == cmd->rule_cnt)
970 break;
971 }
972 cmd->rule_cnt = j;
973 return 0;
974}
975
976static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
977{
978 struct ethtool_rx_flow_spec *fs =
979 (struct ethtool_rx_flow_spec *)&cmd->fs;
980 struct bnxt_ntuple_filter *fltr;
981 struct flow_keys *fkeys;
982 int i, rc = -EINVAL;
983
984 if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
985 return rc;
986
987 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
988 struct hlist_head *head;
989
990 head = &bp->ntp_fltr_hash_tbl[i];
991 rcu_read_lock();
992 hlist_for_each_entry_rcu(fltr, head, hash) {
993 if (fltr->sw_id == fs->location)
994 goto fltr_found;
995 }
996 rcu_read_unlock();
997 }
998 return rc;
999
1000fltr_found:
1001 fkeys = &fltr->fkeys;
1002 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
1003 if (fkeys->basic.ip_proto == IPPROTO_TCP)
1004 fs->flow_type = TCP_V4_FLOW;
1005 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1006 fs->flow_type = UDP_V4_FLOW;
1007 else
1008 goto fltr_err;
1009
1010 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
1011 fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
1012
1013 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
1014 fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
1015
1016 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
1017 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
1018
1019 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
1020 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
1021 } else {
1022 int i;
1023
1024 if (fkeys->basic.ip_proto == IPPROTO_TCP)
1025 fs->flow_type = TCP_V6_FLOW;
1026 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1027 fs->flow_type = UDP_V6_FLOW;
1028 else
1029 goto fltr_err;
1030
1031 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
1032 fkeys->addrs.v6addrs.src;
1033 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
1034 fkeys->addrs.v6addrs.dst;
1035 for (i = 0; i < 4; i++) {
1036 fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
1037 fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
1038 }
1039 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
1040 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
1041
1042 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
1043 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
1044 }
1045
1046 fs->ring_cookie = fltr->rxq;
1047 rc = 0;
1048
1049fltr_err:
1050 rcu_read_unlock();
1051
1052 return rc;
1053}
1054#endif
1055
1056static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
1057{
1058 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
1059 return RXH_IP_SRC | RXH_IP_DST;
1060 return 0;
1061}
1062
1063static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
1064{
1065 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
1066 return RXH_IP_SRC | RXH_IP_DST;
1067 return 0;
1068}
1069
1070static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1071{
1072 cmd->data = 0;
1073 switch (cmd->flow_type) {
1074 case TCP_V4_FLOW:
1075 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
1076 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1077 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1078 cmd->data |= get_ethtool_ipv4_rss(bp);
1079 break;
1080 case UDP_V4_FLOW:
1081 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
1082 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1083 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1084 fallthrough;
1085 case SCTP_V4_FLOW:
1086 case AH_ESP_V4_FLOW:
1087 case AH_V4_FLOW:
1088 case ESP_V4_FLOW:
1089 case IPV4_FLOW:
1090 cmd->data |= get_ethtool_ipv4_rss(bp);
1091 break;
1092
1093 case TCP_V6_FLOW:
1094 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
1095 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1096 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1097 cmd->data |= get_ethtool_ipv6_rss(bp);
1098 break;
1099 case UDP_V6_FLOW:
1100 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
1101 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1102 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1103 fallthrough;
1104 case SCTP_V6_FLOW:
1105 case AH_ESP_V6_FLOW:
1106 case AH_V6_FLOW:
1107 case ESP_V6_FLOW:
1108 case IPV6_FLOW:
1109 cmd->data |= get_ethtool_ipv6_rss(bp);
1110 break;
1111 }
1112 return 0;
1113}
1114
1115#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1116#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1117
1118static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1119{
1120 u32 rss_hash_cfg = bp->rss_hash_cfg;
1121 int tuple, rc = 0;
1122
1123 if (cmd->data == RXH_4TUPLE)
1124 tuple = 4;
1125 else if (cmd->data == RXH_2TUPLE)
1126 tuple = 2;
1127 else if (!cmd->data)
1128 tuple = 0;
1129 else
1130 return -EINVAL;
1131
1132 if (cmd->flow_type == TCP_V4_FLOW) {
1133 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1134 if (tuple == 4)
1135 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1136 } else if (cmd->flow_type == UDP_V4_FLOW) {
1137 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
1138 return -EINVAL;
1139 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1140 if (tuple == 4)
1141 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1142 } else if (cmd->flow_type == TCP_V6_FLOW) {
1143 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1144 if (tuple == 4)
1145 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1146 } else if (cmd->flow_type == UDP_V6_FLOW) {
1147 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
1148 return -EINVAL;
1149 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1150 if (tuple == 4)
1151 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1152 } else if (tuple == 4) {
1153 return -EINVAL;
1154 }
1155
1156 switch (cmd->flow_type) {
1157 case TCP_V4_FLOW:
1158 case UDP_V4_FLOW:
1159 case SCTP_V4_FLOW:
1160 case AH_ESP_V4_FLOW:
1161 case AH_V4_FLOW:
1162 case ESP_V4_FLOW:
1163 case IPV4_FLOW:
1164 if (tuple == 2)
1165 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1166 else if (!tuple)
1167 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1168 break;
1169
1170 case TCP_V6_FLOW:
1171 case UDP_V6_FLOW:
1172 case SCTP_V6_FLOW:
1173 case AH_ESP_V6_FLOW:
1174 case AH_V6_FLOW:
1175 case ESP_V6_FLOW:
1176 case IPV6_FLOW:
1177 if (tuple == 2)
1178 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1179 else if (!tuple)
1180 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1181 break;
1182 }
1183
1184 if (bp->rss_hash_cfg == rss_hash_cfg)
1185 return 0;
1186
1187 bp->rss_hash_cfg = rss_hash_cfg;
1188 if (netif_running(bp->dev)) {
1189 bnxt_close_nic(bp, false, false);
1190 rc = bnxt_open_nic(bp, false, false);
1191 }
1192 return rc;
1193}
1194
1195static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1196 u32 *rule_locs)
1197{
1198 struct bnxt *bp = netdev_priv(dev);
1199 int rc = 0;
1200
1201 switch (cmd->cmd) {
1202#ifdef CONFIG_RFS_ACCEL
1203 case ETHTOOL_GRXRINGS:
1204 cmd->data = bp->rx_nr_rings;
1205 break;
1206
1207 case ETHTOOL_GRXCLSRLCNT:
1208 cmd->rule_cnt = bp->ntp_fltr_count;
1209 cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
1210 break;
1211
1212 case ETHTOOL_GRXCLSRLALL:
1213 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
1214 break;
1215
1216 case ETHTOOL_GRXCLSRULE:
1217 rc = bnxt_grxclsrule(bp, cmd);
1218 break;
1219#endif
1220
1221 case ETHTOOL_GRXFH:
1222 rc = bnxt_grxfh(bp, cmd);
1223 break;
1224
1225 default:
1226 rc = -EOPNOTSUPP;
1227 break;
1228 }
1229
1230 return rc;
1231}
1232
1233static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1234{
1235 struct bnxt *bp = netdev_priv(dev);
1236 int rc;
1237
1238 switch (cmd->cmd) {
1239 case ETHTOOL_SRXFH:
1240 rc = bnxt_srxfh(bp, cmd);
1241 break;
1242
1243 default:
1244 rc = -EOPNOTSUPP;
1245 break;
1246 }
1247 return rc;
1248}
1249
1250u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
1251{
1252 struct bnxt *bp = netdev_priv(dev);
1253
1254 if (bp->flags & BNXT_FLAG_CHIP_P5)
1255 return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
1256 return HW_HASH_INDEX_SIZE;
1257}
1258
1259static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
1260{
1261 return HW_HASH_KEY_SIZE;
1262}
1263
1264static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1265 u8 *hfunc)
1266{
1267 struct bnxt *bp = netdev_priv(dev);
1268 struct bnxt_vnic_info *vnic;
1269 u32 i, tbl_size;
1270
1271 if (hfunc)
1272 *hfunc = ETH_RSS_HASH_TOP;
1273
1274 if (!bp->vnic_info)
1275 return 0;
1276
1277 vnic = &bp->vnic_info[0];
1278 if (indir && bp->rss_indir_tbl) {
1279 tbl_size = bnxt_get_rxfh_indir_size(dev);
1280 for (i = 0; i < tbl_size; i++)
1281 indir[i] = bp->rss_indir_tbl[i];
1282 }
1283
1284 if (key && vnic->rss_hash_key)
1285 memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
1286
1287 return 0;
1288}
1289
1290static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir,
1291 const u8 *key, const u8 hfunc)
1292{
1293 struct bnxt *bp = netdev_priv(dev);
1294 int rc = 0;
1295
1296 if (hfunc && hfunc != ETH_RSS_HASH_TOP)
1297 return -EOPNOTSUPP;
1298
1299 if (key)
1300 return -EOPNOTSUPP;
1301
1302 if (indir) {
1303 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
1304
1305 for (i = 0; i < tbl_size; i++)
1306 bp->rss_indir_tbl[i] = indir[i];
1307 pad = bp->rss_indir_tbl_entries - tbl_size;
1308 if (pad)
1309 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
1310 }
1311
1312 if (netif_running(bp->dev)) {
1313 bnxt_close_nic(bp, false, false);
1314 rc = bnxt_open_nic(bp, false, false);
1315 }
1316 return rc;
1317}
1318
1319static void bnxt_get_drvinfo(struct net_device *dev,
1320 struct ethtool_drvinfo *info)
1321{
1322 struct bnxt *bp = netdev_priv(dev);
1323
1324 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1325 strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
1326 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
1327 info->n_stats = bnxt_get_num_stats(bp);
1328 info->testinfo_len = bp->num_tests;
1329
1330 info->eedump_len = 0;
1331
1332 info->regdump_len = 0;
1333}
1334
1335static int bnxt_get_regs_len(struct net_device *dev)
1336{
1337 struct bnxt *bp = netdev_priv(dev);
1338 int reg_len;
1339
1340 if (!BNXT_PF(bp))
1341 return -EOPNOTSUPP;
1342
1343 reg_len = BNXT_PXP_REG_LEN;
1344
1345 if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
1346 reg_len += sizeof(struct pcie_ctx_hw_stats);
1347
1348 return reg_len;
1349}
1350
1351static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1352 void *_p)
1353{
1354 struct pcie_ctx_hw_stats *hw_pcie_stats;
1355 struct hwrm_pcie_qstats_input req = {0};
1356 struct bnxt *bp = netdev_priv(dev);
1357 dma_addr_t hw_pcie_stats_addr;
1358 int rc;
1359
1360 regs->version = 0;
1361 bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
1362
1363 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
1364 return;
1365
1366 hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev,
1367 sizeof(*hw_pcie_stats),
1368 &hw_pcie_stats_addr, GFP_KERNEL);
1369 if (!hw_pcie_stats)
1370 return;
1371
1372 regs->version = 1;
1373 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
1374 req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
1375 req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
1376 mutex_lock(&bp->hwrm_cmd_lock);
1377 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1378 if (!rc) {
1379 __le64 *src = (__le64 *)hw_pcie_stats;
1380 u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
1381 int i;
1382
1383 for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
1384 dst[i] = le64_to_cpu(src[i]);
1385 }
1386 mutex_unlock(&bp->hwrm_cmd_lock);
1387 dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats,
1388 hw_pcie_stats_addr);
1389}
1390
1391static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1392{
1393 struct bnxt *bp = netdev_priv(dev);
1394
1395 wol->supported = 0;
1396 wol->wolopts = 0;
1397 memset(&wol->sopass, 0, sizeof(wol->sopass));
1398 if (bp->flags & BNXT_FLAG_WOL_CAP) {
1399 wol->supported = WAKE_MAGIC;
1400 if (bp->wol)
1401 wol->wolopts = WAKE_MAGIC;
1402 }
1403}
1404
1405static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1406{
1407 struct bnxt *bp = netdev_priv(dev);
1408
1409 if (wol->wolopts & ~WAKE_MAGIC)
1410 return -EINVAL;
1411
1412 if (wol->wolopts & WAKE_MAGIC) {
1413 if (!(bp->flags & BNXT_FLAG_WOL_CAP))
1414 return -EINVAL;
1415 if (!bp->wol) {
1416 if (bnxt_hwrm_alloc_wol_fltr(bp))
1417 return -EBUSY;
1418 bp->wol = 1;
1419 }
1420 } else {
1421 if (bp->wol) {
1422 if (bnxt_hwrm_free_wol_fltr(bp))
1423 return -EBUSY;
1424 bp->wol = 0;
1425 }
1426 }
1427 return 0;
1428}
1429
1430u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
1431{
1432 u32 speed_mask = 0;
1433
1434
1435
1436 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
1437 speed_mask |= ADVERTISED_100baseT_Full;
1438 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
1439 speed_mask |= ADVERTISED_1000baseT_Full;
1440 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
1441 speed_mask |= ADVERTISED_2500baseX_Full;
1442 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
1443 speed_mask |= ADVERTISED_10000baseT_Full;
1444 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
1445 speed_mask |= ADVERTISED_40000baseCR4_Full;
1446
1447 if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
1448 speed_mask |= ADVERTISED_Pause;
1449 else if (fw_pause & BNXT_LINK_PAUSE_TX)
1450 speed_mask |= ADVERTISED_Asym_Pause;
1451 else if (fw_pause & BNXT_LINK_PAUSE_RX)
1452 speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1453
1454 return speed_mask;
1455}
1456
1457#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
1458{ \
1459 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \
1460 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1461 100baseT_Full); \
1462 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \
1463 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1464 1000baseT_Full); \
1465 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \
1466 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1467 10000baseT_Full); \
1468 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \
1469 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1470 25000baseCR_Full); \
1471 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \
1472 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1473 40000baseCR4_Full);\
1474 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \
1475 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1476 50000baseCR2_Full);\
1477 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \
1478 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1479 100000baseCR4_Full);\
1480 if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \
1481 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1482 Pause); \
1483 if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \
1484 ethtool_link_ksettings_add_link_mode( \
1485 lk_ksettings, name, Asym_Pause);\
1486 } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \
1487 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1488 Asym_Pause); \
1489 } \
1490}
1491
1492#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \
1493{ \
1494 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1495 100baseT_Full) || \
1496 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1497 100baseT_Half)) \
1498 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \
1499 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1500 1000baseT_Full) || \
1501 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1502 1000baseT_Half)) \
1503 (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \
1504 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1505 10000baseT_Full)) \
1506 (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \
1507 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1508 25000baseCR_Full)) \
1509 (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \
1510 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1511 40000baseCR4_Full)) \
1512 (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \
1513 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1514 50000baseCR2_Full)) \
1515 (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \
1516 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1517 100000baseCR4_Full)) \
1518 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \
1519}
1520
1521#define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name) \
1522{ \
1523 if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB) \
1524 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1525 50000baseCR_Full); \
1526 if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB) \
1527 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1528 100000baseCR2_Full);\
1529 if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB) \
1530 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1531 200000baseCR4_Full);\
1532}
1533
1534#define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name) \
1535{ \
1536 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1537 50000baseCR_Full)) \
1538 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB; \
1539 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1540 100000baseCR2_Full)) \
1541 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB; \
1542 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1543 200000baseCR4_Full)) \
1544 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB; \
1545}
1546
1547static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
1548 struct ethtool_link_ksettings *lk_ksettings)
1549{
1550 u16 fec_cfg = link_info->fec_cfg;
1551
1552 if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) {
1553 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1554 lk_ksettings->link_modes.advertising);
1555 return;
1556 }
1557 if (fec_cfg & BNXT_FEC_ENC_BASE_R)
1558 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1559 lk_ksettings->link_modes.advertising);
1560 if (fec_cfg & BNXT_FEC_ENC_RS)
1561 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1562 lk_ksettings->link_modes.advertising);
1563 if (fec_cfg & BNXT_FEC_ENC_LLRS)
1564 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
1565 lk_ksettings->link_modes.advertising);
1566}
1567
1568static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
1569 struct ethtool_link_ksettings *lk_ksettings)
1570{
1571 u16 fw_speeds = link_info->advertising;
1572 u8 fw_pause = 0;
1573
1574 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1575 fw_pause = link_info->auto_pause_setting;
1576
1577 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
1578 fw_speeds = link_info->advertising_pam4;
1579 BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising);
1580 bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
1581}
1582
1583static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
1584 struct ethtool_link_ksettings *lk_ksettings)
1585{
1586 u16 fw_speeds = link_info->lp_auto_link_speeds;
1587 u8 fw_pause = 0;
1588
1589 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1590 fw_pause = link_info->lp_pause;
1591
1592 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
1593 lp_advertising);
1594 fw_speeds = link_info->lp_auto_pam4_link_speeds;
1595 BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising);
1596}
1597
1598static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
1599 struct ethtool_link_ksettings *lk_ksettings)
1600{
1601 u16 fec_cfg = link_info->fec_cfg;
1602
1603 if (fec_cfg & BNXT_FEC_NONE) {
1604 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1605 lk_ksettings->link_modes.supported);
1606 return;
1607 }
1608 if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)
1609 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1610 lk_ksettings->link_modes.supported);
1611 if (fec_cfg & BNXT_FEC_ENC_RS_CAP)
1612 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1613 lk_ksettings->link_modes.supported);
1614 if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP)
1615 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
1616 lk_ksettings->link_modes.supported);
1617}
1618
1619static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
1620 struct ethtool_link_ksettings *lk_ksettings)
1621{
1622 u16 fw_speeds = link_info->support_speeds;
1623
1624 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
1625 fw_speeds = link_info->support_pam4_speeds;
1626 BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported);
1627
1628 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
1629 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1630 Asym_Pause);
1631
1632 if (link_info->support_auto_speeds ||
1633 link_info->support_pam4_auto_speeds)
1634 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1635 Autoneg);
1636 bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
1637}
1638
1639u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
1640{
1641 switch (fw_link_speed) {
1642 case BNXT_LINK_SPEED_100MB:
1643 return SPEED_100;
1644 case BNXT_LINK_SPEED_1GB:
1645 return SPEED_1000;
1646 case BNXT_LINK_SPEED_2_5GB:
1647 return SPEED_2500;
1648 case BNXT_LINK_SPEED_10GB:
1649 return SPEED_10000;
1650 case BNXT_LINK_SPEED_20GB:
1651 return SPEED_20000;
1652 case BNXT_LINK_SPEED_25GB:
1653 return SPEED_25000;
1654 case BNXT_LINK_SPEED_40GB:
1655 return SPEED_40000;
1656 case BNXT_LINK_SPEED_50GB:
1657 return SPEED_50000;
1658 case BNXT_LINK_SPEED_100GB:
1659 return SPEED_100000;
1660 default:
1661 return SPEED_UNKNOWN;
1662 }
1663}
1664
1665static int bnxt_get_link_ksettings(struct net_device *dev,
1666 struct ethtool_link_ksettings *lk_ksettings)
1667{
1668 struct bnxt *bp = netdev_priv(dev);
1669 struct bnxt_link_info *link_info = &bp->link_info;
1670 struct ethtool_link_settings *base = &lk_ksettings->base;
1671 u32 ethtool_speed;
1672
1673 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
1674 mutex_lock(&bp->link_lock);
1675 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
1676
1677 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
1678 if (link_info->autoneg) {
1679 bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
1680 ethtool_link_ksettings_add_link_mode(lk_ksettings,
1681 advertising, Autoneg);
1682 base->autoneg = AUTONEG_ENABLE;
1683 base->duplex = DUPLEX_UNKNOWN;
1684 if (link_info->phy_link_status == BNXT_LINK_LINK) {
1685 bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
1686 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
1687 base->duplex = DUPLEX_FULL;
1688 else
1689 base->duplex = DUPLEX_HALF;
1690 }
1691 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
1692 } else {
1693 base->autoneg = AUTONEG_DISABLE;
1694 ethtool_speed =
1695 bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
1696 base->duplex = DUPLEX_HALF;
1697 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
1698 base->duplex = DUPLEX_FULL;
1699 }
1700 base->speed = ethtool_speed;
1701
1702 base->port = PORT_NONE;
1703 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1704 base->port = PORT_TP;
1705 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1706 TP);
1707 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1708 TP);
1709 } else {
1710 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1711 FIBRE);
1712 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1713 FIBRE);
1714
1715 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
1716 base->port = PORT_DA;
1717 else if (link_info->media_type ==
1718 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
1719 base->port = PORT_FIBRE;
1720 }
1721 base->phy_address = link_info->phy_addr;
1722 mutex_unlock(&bp->link_lock);
1723
1724 return 0;
1725}
1726
1727static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
1728{
1729 struct bnxt *bp = netdev_priv(dev);
1730 struct bnxt_link_info *link_info = &bp->link_info;
1731 u16 support_pam4_spds = link_info->support_pam4_speeds;
1732 u16 support_spds = link_info->support_speeds;
1733 u8 sig_mode = BNXT_SIG_MODE_NRZ;
1734 u16 fw_speed = 0;
1735
1736 switch (ethtool_speed) {
1737 case SPEED_100:
1738 if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
1739 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
1740 break;
1741 case SPEED_1000:
1742 if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
1743 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
1744 break;
1745 case SPEED_2500:
1746 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
1747 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
1748 break;
1749 case SPEED_10000:
1750 if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
1751 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
1752 break;
1753 case SPEED_20000:
1754 if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
1755 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
1756 break;
1757 case SPEED_25000:
1758 if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
1759 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
1760 break;
1761 case SPEED_40000:
1762 if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
1763 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
1764 break;
1765 case SPEED_50000:
1766 if (support_spds & BNXT_LINK_SPEED_MSK_50GB) {
1767 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
1768 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
1769 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
1770 sig_mode = BNXT_SIG_MODE_PAM4;
1771 }
1772 break;
1773 case SPEED_100000:
1774 if (support_spds & BNXT_LINK_SPEED_MSK_100GB) {
1775 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
1776 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
1777 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
1778 sig_mode = BNXT_SIG_MODE_PAM4;
1779 }
1780 break;
1781 case SPEED_200000:
1782 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
1783 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
1784 sig_mode = BNXT_SIG_MODE_PAM4;
1785 }
1786 break;
1787 }
1788
1789 if (!fw_speed) {
1790 netdev_err(dev, "unsupported speed!\n");
1791 return -EINVAL;
1792 }
1793
1794 if (link_info->req_link_speed == fw_speed &&
1795 link_info->req_signal_mode == sig_mode &&
1796 link_info->autoneg == 0)
1797 return -EALREADY;
1798
1799 link_info->req_link_speed = fw_speed;
1800 link_info->req_signal_mode = sig_mode;
1801 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
1802 link_info->autoneg = 0;
1803 link_info->advertising = 0;
1804 link_info->advertising_pam4 = 0;
1805
1806 return 0;
1807}
1808
1809u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
1810{
1811 u16 fw_speed_mask = 0;
1812
1813
1814 if (advertising & (ADVERTISED_100baseT_Full |
1815 ADVERTISED_100baseT_Half)) {
1816 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
1817 }
1818 if (advertising & (ADVERTISED_1000baseT_Full |
1819 ADVERTISED_1000baseT_Half)) {
1820 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
1821 }
1822 if (advertising & ADVERTISED_10000baseT_Full)
1823 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
1824
1825 if (advertising & ADVERTISED_40000baseCR4_Full)
1826 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
1827
1828 return fw_speed_mask;
1829}
1830
1831static int bnxt_set_link_ksettings(struct net_device *dev,
1832 const struct ethtool_link_ksettings *lk_ksettings)
1833{
1834 struct bnxt *bp = netdev_priv(dev);
1835 struct bnxt_link_info *link_info = &bp->link_info;
1836 const struct ethtool_link_settings *base = &lk_ksettings->base;
1837 bool set_pause = false;
1838 u32 speed;
1839 int rc = 0;
1840
1841 if (!BNXT_PHY_CFG_ABLE(bp))
1842 return -EOPNOTSUPP;
1843
1844 mutex_lock(&bp->link_lock);
1845 if (base->autoneg == AUTONEG_ENABLE) {
1846 link_info->advertising = 0;
1847 link_info->advertising_pam4 = 0;
1848 BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings,
1849 advertising);
1850 BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4,
1851 lk_ksettings, advertising);
1852 link_info->autoneg |= BNXT_AUTONEG_SPEED;
1853 if (!link_info->advertising && !link_info->advertising_pam4) {
1854 link_info->advertising = link_info->support_auto_speeds;
1855 link_info->advertising_pam4 =
1856 link_info->support_pam4_auto_speeds;
1857 }
1858
1859
1860
1861 set_pause = true;
1862 } else {
1863 u8 phy_type = link_info->phy_type;
1864
1865 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
1866 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
1867 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1868 netdev_err(dev, "10GBase-T devices must autoneg\n");
1869 rc = -EINVAL;
1870 goto set_setting_exit;
1871 }
1872 if (base->duplex == DUPLEX_HALF) {
1873 netdev_err(dev, "HALF DUPLEX is not supported!\n");
1874 rc = -EINVAL;
1875 goto set_setting_exit;
1876 }
1877 speed = base->speed;
1878 rc = bnxt_force_link_speed(dev, speed);
1879 if (rc) {
1880 if (rc == -EALREADY)
1881 rc = 0;
1882 goto set_setting_exit;
1883 }
1884 }
1885
1886 if (netif_running(dev))
1887 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
1888
1889set_setting_exit:
1890 mutex_unlock(&bp->link_lock);
1891 return rc;
1892}
1893
1894static int bnxt_get_fecparam(struct net_device *dev,
1895 struct ethtool_fecparam *fec)
1896{
1897 struct bnxt *bp = netdev_priv(dev);
1898 struct bnxt_link_info *link_info;
1899 u8 active_fec;
1900 u16 fec_cfg;
1901
1902 link_info = &bp->link_info;
1903 fec_cfg = link_info->fec_cfg;
1904 active_fec = link_info->active_fec_sig_mode &
1905 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
1906 if (fec_cfg & BNXT_FEC_NONE) {
1907 fec->fec = ETHTOOL_FEC_NONE;
1908 fec->active_fec = ETHTOOL_FEC_NONE;
1909 return 0;
1910 }
1911 if (fec_cfg & BNXT_FEC_AUTONEG)
1912 fec->fec |= ETHTOOL_FEC_AUTO;
1913 if (fec_cfg & BNXT_FEC_ENC_BASE_R)
1914 fec->fec |= ETHTOOL_FEC_BASER;
1915 if (fec_cfg & BNXT_FEC_ENC_RS)
1916 fec->fec |= ETHTOOL_FEC_RS;
1917 if (fec_cfg & BNXT_FEC_ENC_LLRS)
1918 fec->fec |= ETHTOOL_FEC_LLRS;
1919
1920 switch (active_fec) {
1921 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
1922 fec->active_fec |= ETHTOOL_FEC_BASER;
1923 break;
1924 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
1925 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
1926 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
1927 fec->active_fec |= ETHTOOL_FEC_RS;
1928 break;
1929 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
1930 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
1931 fec->active_fec |= ETHTOOL_FEC_LLRS;
1932 break;
1933 }
1934 return 0;
1935}
1936
1937static void bnxt_get_fec_stats(struct net_device *dev,
1938 struct ethtool_fec_stats *fec_stats)
1939{
1940 struct bnxt *bp = netdev_priv(dev);
1941 u64 *rx;
1942
1943 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
1944 return;
1945
1946 rx = bp->rx_port_stats_ext.sw_stats;
1947 fec_stats->corrected_bits.total =
1948 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
1949}
1950
1951static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
1952 u32 fec)
1953{
1954 u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE;
1955
1956 if (fec & ETHTOOL_FEC_BASER)
1957 fw_fec |= BNXT_FEC_BASE_R_ON(link_info);
1958 else if (fec & ETHTOOL_FEC_RS)
1959 fw_fec |= BNXT_FEC_RS_ON(link_info);
1960 else if (fec & ETHTOOL_FEC_LLRS)
1961 fw_fec |= BNXT_FEC_LLRS_ON;
1962 return fw_fec;
1963}
1964
1965static int bnxt_set_fecparam(struct net_device *dev,
1966 struct ethtool_fecparam *fecparam)
1967{
1968 struct hwrm_port_phy_cfg_input req = {0};
1969 struct bnxt *bp = netdev_priv(dev);
1970 struct bnxt_link_info *link_info;
1971 u32 new_cfg, fec = fecparam->fec;
1972 u16 fec_cfg;
1973 int rc;
1974
1975 link_info = &bp->link_info;
1976 fec_cfg = link_info->fec_cfg;
1977 if (fec_cfg & BNXT_FEC_NONE)
1978 return -EOPNOTSUPP;
1979
1980 if (fec & ETHTOOL_FEC_OFF) {
1981 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE |
1982 BNXT_FEC_ALL_OFF(link_info);
1983 goto apply_fec;
1984 }
1985 if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) ||
1986 ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) ||
1987 ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) ||
1988 ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)))
1989 return -EINVAL;
1990
1991 if (fec & ETHTOOL_FEC_AUTO) {
1992 if (!link_info->autoneg)
1993 return -EINVAL;
1994 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE;
1995 } else {
1996 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec);
1997 }
1998
1999apply_fec:
2000 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
2001 req.flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
2002 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2003
2004 if (!rc) {
2005 mutex_lock(&bp->link_lock);
2006 bnxt_update_link(bp, false);
2007 mutex_unlock(&bp->link_lock);
2008 }
2009 return rc;
2010}
2011
2012static void bnxt_get_pauseparam(struct net_device *dev,
2013 struct ethtool_pauseparam *epause)
2014{
2015 struct bnxt *bp = netdev_priv(dev);
2016 struct bnxt_link_info *link_info = &bp->link_info;
2017
2018 if (BNXT_VF(bp))
2019 return;
2020 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
2021 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
2022 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
2023}
2024
2025static void bnxt_get_pause_stats(struct net_device *dev,
2026 struct ethtool_pause_stats *epstat)
2027{
2028 struct bnxt *bp = netdev_priv(dev);
2029 u64 *rx, *tx;
2030
2031 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
2032 return;
2033
2034 rx = bp->port_stats.sw_stats;
2035 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
2036
2037 epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
2038 epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
2039}
2040
2041static int bnxt_set_pauseparam(struct net_device *dev,
2042 struct ethtool_pauseparam *epause)
2043{
2044 int rc = 0;
2045 struct bnxt *bp = netdev_priv(dev);
2046 struct bnxt_link_info *link_info = &bp->link_info;
2047
2048 if (!BNXT_PHY_CFG_ABLE(bp))
2049 return -EOPNOTSUPP;
2050
2051 mutex_lock(&bp->link_lock);
2052 if (epause->autoneg) {
2053 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
2054 rc = -EINVAL;
2055 goto pause_exit;
2056 }
2057
2058 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
2059 if (bp->hwrm_spec_code >= 0x10201)
2060 link_info->req_flow_ctrl =
2061 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
2062 } else {
2063
2064
2065
2066 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
2067 link_info->force_link_chng = true;
2068 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
2069 link_info->req_flow_ctrl = 0;
2070 }
2071 if (epause->rx_pause)
2072 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
2073
2074 if (epause->tx_pause)
2075 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
2076
2077 if (netif_running(dev))
2078 rc = bnxt_hwrm_set_pause(bp);
2079
2080pause_exit:
2081 mutex_unlock(&bp->link_lock);
2082 return rc;
2083}
2084
2085static u32 bnxt_get_link(struct net_device *dev)
2086{
2087 struct bnxt *bp = netdev_priv(dev);
2088
2089
2090 return bp->link_info.link_up;
2091}
2092
2093int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
2094 struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
2095{
2096 struct hwrm_nvm_get_dev_info_output *resp = bp->hwrm_cmd_resp_addr;
2097 struct hwrm_nvm_get_dev_info_input req = {0};
2098 int rc;
2099
2100 if (BNXT_VF(bp))
2101 return -EOPNOTSUPP;
2102
2103 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DEV_INFO, -1, -1);
2104 mutex_lock(&bp->hwrm_cmd_lock);
2105 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2106 if (!rc)
2107 memcpy(nvm_dev_info, resp, sizeof(*resp));
2108 mutex_unlock(&bp->hwrm_cmd_lock);
2109 return rc;
2110}
2111
2112static void bnxt_print_admin_err(struct bnxt *bp)
2113{
2114 netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
2115}
2116
2117static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2118 u16 ext, u16 *index, u32 *item_length,
2119 u32 *data_length);
2120
2121static int __bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
2122 u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
2123 u32 dir_item_len, const u8 *data,
2124 size_t data_len)
2125{
2126 struct bnxt *bp = netdev_priv(dev);
2127 int rc;
2128 struct hwrm_nvm_write_input req = {0};
2129 dma_addr_t dma_handle;
2130 u8 *kmem = NULL;
2131
2132 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
2133
2134 req.dir_type = cpu_to_le16(dir_type);
2135 req.dir_ordinal = cpu_to_le16(dir_ordinal);
2136 req.dir_ext = cpu_to_le16(dir_ext);
2137 req.dir_attr = cpu_to_le16(dir_attr);
2138 req.dir_item_length = cpu_to_le32(dir_item_len);
2139 if (data_len && data) {
2140 req.dir_data_length = cpu_to_le32(data_len);
2141
2142 kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
2143 GFP_KERNEL);
2144 if (!kmem)
2145 return -ENOMEM;
2146
2147 memcpy(kmem, data, data_len);
2148 req.host_src_addr = cpu_to_le64(dma_handle);
2149 }
2150
2151 rc = _hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
2152 if (kmem)
2153 dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
2154
2155 if (rc == -EACCES)
2156 bnxt_print_admin_err(bp);
2157 return rc;
2158}
2159
2160static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
2161 u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
2162 const u8 *data, size_t data_len)
2163{
2164 struct bnxt *bp = netdev_priv(dev);
2165 int rc;
2166
2167 mutex_lock(&bp->hwrm_cmd_lock);
2168 rc = __bnxt_flash_nvram(dev, dir_type, dir_ordinal, dir_ext, dir_attr,
2169 0, data, data_len);
2170 mutex_unlock(&bp->hwrm_cmd_lock);
2171 return rc;
2172}
2173
2174static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
2175 u8 self_reset, u8 flags)
2176{
2177 struct hwrm_fw_reset_input req = {0};
2178 struct bnxt *bp = netdev_priv(dev);
2179 int rc;
2180
2181 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
2182
2183 req.embedded_proc_type = proc_type;
2184 req.selfrst_status = self_reset;
2185 req.flags = flags;
2186
2187 if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
2188 rc = hwrm_send_message_silent(bp, &req, sizeof(req),
2189 HWRM_CMD_TIMEOUT);
2190 } else {
2191 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2192 if (rc == -EACCES)
2193 bnxt_print_admin_err(bp);
2194 }
2195 return rc;
2196}
2197
2198static int bnxt_firmware_reset(struct net_device *dev,
2199 enum bnxt_nvm_directory_type dir_type)
2200{
2201 u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
2202 u8 proc_type, flags = 0;
2203
2204
2205
2206 switch (dir_type) {
2207 case BNX_DIR_TYPE_CHIMP_PATCH:
2208 case BNX_DIR_TYPE_BOOTCODE:
2209 case BNX_DIR_TYPE_BOOTCODE_2:
2210 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
2211
2212 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
2213 break;
2214 case BNX_DIR_TYPE_APE_FW:
2215 case BNX_DIR_TYPE_APE_PATCH:
2216 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
2217
2218 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
2219 break;
2220 case BNX_DIR_TYPE_KONG_FW:
2221 case BNX_DIR_TYPE_KONG_PATCH:
2222 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
2223 break;
2224 case BNX_DIR_TYPE_BONO_FW:
2225 case BNX_DIR_TYPE_BONO_PATCH:
2226 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
2227 break;
2228 default:
2229 return -EINVAL;
2230 }
2231
2232 return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
2233}
2234
2235static int bnxt_firmware_reset_chip(struct net_device *dev)
2236{
2237 struct bnxt *bp = netdev_priv(dev);
2238 u8 flags = 0;
2239
2240 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
2241 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
2242
2243 return bnxt_hwrm_firmware_reset(dev,
2244 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
2245 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
2246 flags);
2247}
2248
2249static int bnxt_firmware_reset_ap(struct net_device *dev)
2250{
2251 return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
2252 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
2253 0);
2254}
2255
2256static int bnxt_flash_firmware(struct net_device *dev,
2257 u16 dir_type,
2258 const u8 *fw_data,
2259 size_t fw_size)
2260{
2261 int rc = 0;
2262 u16 code_type;
2263 u32 stored_crc;
2264 u32 calculated_crc;
2265 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
2266
2267 switch (dir_type) {
2268 case BNX_DIR_TYPE_BOOTCODE:
2269 case BNX_DIR_TYPE_BOOTCODE_2:
2270 code_type = CODE_BOOT;
2271 break;
2272 case BNX_DIR_TYPE_CHIMP_PATCH:
2273 code_type = CODE_CHIMP_PATCH;
2274 break;
2275 case BNX_DIR_TYPE_APE_FW:
2276 code_type = CODE_MCTP_PASSTHRU;
2277 break;
2278 case BNX_DIR_TYPE_APE_PATCH:
2279 code_type = CODE_APE_PATCH;
2280 break;
2281 case BNX_DIR_TYPE_KONG_FW:
2282 code_type = CODE_KONG_FW;
2283 break;
2284 case BNX_DIR_TYPE_KONG_PATCH:
2285 code_type = CODE_KONG_PATCH;
2286 break;
2287 case BNX_DIR_TYPE_BONO_FW:
2288 code_type = CODE_BONO_FW;
2289 break;
2290 case BNX_DIR_TYPE_BONO_PATCH:
2291 code_type = CODE_BONO_PATCH;
2292 break;
2293 default:
2294 netdev_err(dev, "Unsupported directory entry type: %u\n",
2295 dir_type);
2296 return -EINVAL;
2297 }
2298 if (fw_size < sizeof(struct bnxt_fw_header)) {
2299 netdev_err(dev, "Invalid firmware file size: %u\n",
2300 (unsigned int)fw_size);
2301 return -EINVAL;
2302 }
2303 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
2304 netdev_err(dev, "Invalid firmware signature: %08X\n",
2305 le32_to_cpu(header->signature));
2306 return -EINVAL;
2307 }
2308 if (header->code_type != code_type) {
2309 netdev_err(dev, "Expected firmware type: %d, read: %d\n",
2310 code_type, header->code_type);
2311 return -EINVAL;
2312 }
2313 if (header->device != DEVICE_CUMULUS_FAMILY) {
2314 netdev_err(dev, "Expected firmware device family %d, read: %d\n",
2315 DEVICE_CUMULUS_FAMILY, header->device);
2316 return -EINVAL;
2317 }
2318
2319 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
2320 sizeof(stored_crc)));
2321 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
2322 if (calculated_crc != stored_crc) {
2323 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
2324 (unsigned long)stored_crc,
2325 (unsigned long)calculated_crc);
2326 return -EINVAL;
2327 }
2328 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2329 0, 0, fw_data, fw_size);
2330 if (rc == 0)
2331 rc = bnxt_firmware_reset(dev, dir_type);
2332
2333 return rc;
2334}
2335
2336static int bnxt_flash_microcode(struct net_device *dev,
2337 u16 dir_type,
2338 const u8 *fw_data,
2339 size_t fw_size)
2340{
2341 struct bnxt_ucode_trailer *trailer;
2342 u32 calculated_crc;
2343 u32 stored_crc;
2344 int rc = 0;
2345
2346 if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
2347 netdev_err(dev, "Invalid microcode file size: %u\n",
2348 (unsigned int)fw_size);
2349 return -EINVAL;
2350 }
2351 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
2352 sizeof(*trailer)));
2353 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
2354 netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
2355 le32_to_cpu(trailer->sig));
2356 return -EINVAL;
2357 }
2358 if (le16_to_cpu(trailer->dir_type) != dir_type) {
2359 netdev_err(dev, "Expected microcode type: %d, read: %d\n",
2360 dir_type, le16_to_cpu(trailer->dir_type));
2361 return -EINVAL;
2362 }
2363 if (le16_to_cpu(trailer->trailer_length) <
2364 sizeof(struct bnxt_ucode_trailer)) {
2365 netdev_err(dev, "Invalid microcode trailer length: %d\n",
2366 le16_to_cpu(trailer->trailer_length));
2367 return -EINVAL;
2368 }
2369
2370
2371 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
2372 sizeof(stored_crc)));
2373 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
2374 if (calculated_crc != stored_crc) {
2375 netdev_err(dev,
2376 "CRC32 (%08lX) does not match calculated: %08lX\n",
2377 (unsigned long)stored_crc,
2378 (unsigned long)calculated_crc);
2379 return -EINVAL;
2380 }
2381 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2382 0, 0, fw_data, fw_size);
2383
2384 return rc;
2385}
2386
2387static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
2388{
2389 switch (dir_type) {
2390 case BNX_DIR_TYPE_CHIMP_PATCH:
2391 case BNX_DIR_TYPE_BOOTCODE:
2392 case BNX_DIR_TYPE_BOOTCODE_2:
2393 case BNX_DIR_TYPE_APE_FW:
2394 case BNX_DIR_TYPE_APE_PATCH:
2395 case BNX_DIR_TYPE_KONG_FW:
2396 case BNX_DIR_TYPE_KONG_PATCH:
2397 case BNX_DIR_TYPE_BONO_FW:
2398 case BNX_DIR_TYPE_BONO_PATCH:
2399 return true;
2400 }
2401
2402 return false;
2403}
2404
2405static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
2406{
2407 switch (dir_type) {
2408 case BNX_DIR_TYPE_AVS:
2409 case BNX_DIR_TYPE_EXP_ROM_MBA:
2410 case BNX_DIR_TYPE_PCIE:
2411 case BNX_DIR_TYPE_TSCF_UCODE:
2412 case BNX_DIR_TYPE_EXT_PHY:
2413 case BNX_DIR_TYPE_CCM:
2414 case BNX_DIR_TYPE_ISCSI_BOOT:
2415 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
2416 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
2417 return true;
2418 }
2419
2420 return false;
2421}
2422
2423static bool bnxt_dir_type_is_executable(u16 dir_type)
2424{
2425 return bnxt_dir_type_is_ape_bin_format(dir_type) ||
2426 bnxt_dir_type_is_other_exec_format(dir_type);
2427}
2428
2429static int bnxt_flash_firmware_from_file(struct net_device *dev,
2430 u16 dir_type,
2431 const char *filename)
2432{
2433 const struct firmware *fw;
2434 int rc;
2435
2436 rc = request_firmware(&fw, filename, &dev->dev);
2437 if (rc != 0) {
2438 netdev_err(dev, "Error %d requesting firmware file: %s\n",
2439 rc, filename);
2440 return rc;
2441 }
2442 if (bnxt_dir_type_is_ape_bin_format(dir_type))
2443 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
2444 else if (bnxt_dir_type_is_other_exec_format(dir_type))
2445 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
2446 else
2447 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2448 0, 0, fw->data, fw->size);
2449 release_firmware(fw);
2450 return rc;
2451}
2452
2453#define BNXT_PKG_DMA_SIZE 0x40000
2454#define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
2455#define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
2456
2457int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
2458 u32 install_type)
2459{
2460 struct hwrm_nvm_install_update_input install = {0};
2461 struct hwrm_nvm_install_update_output resp = {0};
2462 struct hwrm_nvm_modify_input modify = {0};
2463 struct bnxt *bp = netdev_priv(dev);
2464 bool defrag_attempted = false;
2465 dma_addr_t dma_handle;
2466 u8 *kmem = NULL;
2467 u32 modify_len;
2468 u32 item_len;
2469 int rc = 0;
2470 u16 index;
2471
2472 bnxt_hwrm_fw_set_time(bp);
2473
2474 bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
2475
2476
2477
2478
2479 modify_len = roundup_pow_of_two(fw->size);
2480 modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE);
2481 while (1) {
2482 kmem = dma_alloc_coherent(&bp->pdev->dev, modify_len,
2483 &dma_handle, GFP_KERNEL);
2484 if (!kmem && modify_len > PAGE_SIZE)
2485 modify_len /= 2;
2486 else
2487 break;
2488 }
2489 if (!kmem)
2490 return -ENOMEM;
2491
2492 modify.host_src_addr = cpu_to_le64(dma_handle);
2493
2494 bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
2495 if ((install_type & 0xffff) == 0)
2496 install_type >>= 16;
2497 install.install_type = cpu_to_le32(install_type);
2498
2499 do {
2500 u32 copied = 0, len = modify_len;
2501
2502 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
2503 BNX_DIR_ORDINAL_FIRST,
2504 BNX_DIR_EXT_NONE,
2505 &index, &item_len, NULL);
2506 if (rc) {
2507 netdev_err(dev, "PKG update area not created in nvram\n");
2508 break;
2509 }
2510 if (fw->size > item_len) {
2511 netdev_err(dev, "PKG insufficient update area in nvram: %lu\n",
2512 (unsigned long)fw->size);
2513 rc = -EFBIG;
2514 break;
2515 }
2516
2517 modify.dir_idx = cpu_to_le16(index);
2518
2519 if (fw->size > modify_len)
2520 modify.flags = BNXT_NVM_MORE_FLAG;
2521 while (copied < fw->size) {
2522 u32 balance = fw->size - copied;
2523
2524 if (balance <= modify_len) {
2525 len = balance;
2526 if (copied)
2527 modify.flags |= BNXT_NVM_LAST_FLAG;
2528 }
2529 memcpy(kmem, fw->data + copied, len);
2530 modify.len = cpu_to_le32(len);
2531 modify.offset = cpu_to_le32(copied);
2532 rc = hwrm_send_message(bp, &modify, sizeof(modify),
2533 FLASH_PACKAGE_TIMEOUT);
2534 if (rc)
2535 goto pkg_abort;
2536 copied += len;
2537 }
2538 mutex_lock(&bp->hwrm_cmd_lock);
2539 rc = _hwrm_send_message_silent(bp, &install, sizeof(install),
2540 INSTALL_PACKAGE_TIMEOUT);
2541 memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp));
2542
2543 if (defrag_attempted) {
2544
2545
2546
2547 mutex_unlock(&bp->hwrm_cmd_lock);
2548 break;
2549 }
2550
2551 if (rc && ((struct hwrm_err_output *)&resp)->cmd_err ==
2552 NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
2553 install.flags =
2554 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
2555
2556 rc = _hwrm_send_message_silent(bp, &install,
2557 sizeof(install),
2558 INSTALL_PACKAGE_TIMEOUT);
2559 memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp));
2560
2561 if (rc && ((struct hwrm_err_output *)&resp)->cmd_err ==
2562 NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
2563
2564
2565
2566 defrag_attempted = true;
2567 install.flags = 0;
2568 rc = __bnxt_flash_nvram(bp->dev,
2569 BNX_DIR_TYPE_UPDATE,
2570 BNX_DIR_ORDINAL_FIRST,
2571 0, 0, item_len, NULL,
2572 0);
2573 } else if (rc) {
2574 netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
2575 }
2576 } else if (rc) {
2577 netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
2578 }
2579 mutex_unlock(&bp->hwrm_cmd_lock);
2580 } while (defrag_attempted && !rc);
2581
2582pkg_abort:
2583 dma_free_coherent(&bp->pdev->dev, modify_len, kmem, dma_handle);
2584 if (resp.result) {
2585 netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
2586 (s8)resp.result, (int)resp.problem_item);
2587 rc = -ENOPKG;
2588 }
2589 if (rc == -EACCES)
2590 bnxt_print_admin_err(bp);
2591 return rc;
2592}
2593
2594static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
2595 u32 install_type)
2596{
2597 const struct firmware *fw;
2598 int rc;
2599
2600 rc = request_firmware(&fw, filename, &dev->dev);
2601 if (rc != 0) {
2602 netdev_err(dev, "PKG error %d requesting file: %s\n",
2603 rc, filename);
2604 return rc;
2605 }
2606
2607 rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type);
2608
2609 release_firmware(fw);
2610
2611 return rc;
2612}
2613
2614static int bnxt_flash_device(struct net_device *dev,
2615 struct ethtool_flash *flash)
2616{
2617 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
2618 netdev_err(dev, "flashdev not supported from a virtual function\n");
2619 return -EINVAL;
2620 }
2621
2622 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
2623 flash->region > 0xffff)
2624 return bnxt_flash_package_from_file(dev, flash->data,
2625 flash->region);
2626
2627 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
2628}
2629
2630static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
2631{
2632 struct bnxt *bp = netdev_priv(dev);
2633 int rc;
2634 struct hwrm_nvm_get_dir_info_input req = {0};
2635 struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
2636
2637 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
2638
2639 mutex_lock(&bp->hwrm_cmd_lock);
2640 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2641 if (!rc) {
2642 *entries = le32_to_cpu(output->entries);
2643 *length = le32_to_cpu(output->entry_length);
2644 }
2645 mutex_unlock(&bp->hwrm_cmd_lock);
2646 return rc;
2647}
2648
2649static int bnxt_get_eeprom_len(struct net_device *dev)
2650{
2651 struct bnxt *bp = netdev_priv(dev);
2652
2653 if (BNXT_VF(bp))
2654 return 0;
2655
2656
2657
2658
2659 return -1;
2660}
2661
2662static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
2663{
2664 struct bnxt *bp = netdev_priv(dev);
2665 int rc;
2666 u32 dir_entries;
2667 u32 entry_length;
2668 u8 *buf;
2669 size_t buflen;
2670 dma_addr_t dma_handle;
2671 struct hwrm_nvm_get_dir_entries_input req = {0};
2672
2673 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
2674 if (rc != 0)
2675 return rc;
2676
2677 if (!dir_entries || !entry_length)
2678 return -EIO;
2679
2680
2681 if (len < 2)
2682 return -EINVAL;
2683
2684 *data++ = dir_entries;
2685 *data++ = entry_length;
2686 len -= 2;
2687 memset(data, 0xff, len);
2688
2689 buflen = dir_entries * entry_length;
2690 buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
2691 GFP_KERNEL);
2692 if (!buf) {
2693 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
2694 (unsigned)buflen);
2695 return -ENOMEM;
2696 }
2697 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
2698 req.host_dest_addr = cpu_to_le64(dma_handle);
2699 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2700 if (rc == 0)
2701 memcpy(data, buf, len > buflen ? buflen : len);
2702 dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
2703 return rc;
2704}
2705
2706static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
2707 u32 length, u8 *data)
2708{
2709 struct bnxt *bp = netdev_priv(dev);
2710 int rc;
2711 u8 *buf;
2712 dma_addr_t dma_handle;
2713 struct hwrm_nvm_read_input req = {0};
2714
2715 if (!length)
2716 return -EINVAL;
2717
2718 buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
2719 GFP_KERNEL);
2720 if (!buf) {
2721 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
2722 (unsigned)length);
2723 return -ENOMEM;
2724 }
2725 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
2726 req.host_dest_addr = cpu_to_le64(dma_handle);
2727 req.dir_idx = cpu_to_le16(index);
2728 req.offset = cpu_to_le32(offset);
2729 req.len = cpu_to_le32(length);
2730
2731 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2732 if (rc == 0)
2733 memcpy(data, buf, length);
2734 dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
2735 return rc;
2736}
2737
2738static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2739 u16 ext, u16 *index, u32 *item_length,
2740 u32 *data_length)
2741{
2742 struct bnxt *bp = netdev_priv(dev);
2743 int rc;
2744 struct hwrm_nvm_find_dir_entry_input req = {0};
2745 struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
2746
2747 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
2748 req.enables = 0;
2749 req.dir_idx = 0;
2750 req.dir_type = cpu_to_le16(type);
2751 req.dir_ordinal = cpu_to_le16(ordinal);
2752 req.dir_ext = cpu_to_le16(ext);
2753 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
2754 mutex_lock(&bp->hwrm_cmd_lock);
2755 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2756 if (rc == 0) {
2757 if (index)
2758 *index = le16_to_cpu(output->dir_idx);
2759 if (item_length)
2760 *item_length = le32_to_cpu(output->dir_item_length);
2761 if (data_length)
2762 *data_length = le32_to_cpu(output->dir_data_length);
2763 }
2764 mutex_unlock(&bp->hwrm_cmd_lock);
2765 return rc;
2766}
2767
2768static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
2769{
2770 char *retval = NULL;
2771 char *p;
2772 char *value;
2773 int field = 0;
2774
2775 if (datalen < 1)
2776 return NULL;
2777
2778 data[datalen - 1] = 0;
2779 for (p = data; *p != 0; p++) {
2780 field = 0;
2781 retval = NULL;
2782 while (*p != 0 && *p != '\n') {
2783 value = p;
2784 while (*p != 0 && *p != '\t' && *p != '\n')
2785 p++;
2786 if (field == desired_field)
2787 retval = value;
2788 if (*p != '\t')
2789 break;
2790 *p = 0;
2791 field++;
2792 p++;
2793 }
2794 if (*p == 0)
2795 break;
2796 *p = 0;
2797 }
2798 return retval;
2799}
2800
2801static void bnxt_get_pkgver(struct net_device *dev)
2802{
2803 struct bnxt *bp = netdev_priv(dev);
2804 u16 index = 0;
2805 char *pkgver;
2806 u32 pkglen;
2807 u8 *pkgbuf;
2808 int len;
2809
2810 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
2811 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
2812 &index, NULL, &pkglen) != 0)
2813 return;
2814
2815 pkgbuf = kzalloc(pkglen, GFP_KERNEL);
2816 if (!pkgbuf) {
2817 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
2818 pkglen);
2819 return;
2820 }
2821
2822 if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
2823 goto err;
2824
2825 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
2826 pkglen);
2827 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
2828 len = strlen(bp->fw_ver_str);
2829 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
2830 "/pkg %s", pkgver);
2831 }
2832err:
2833 kfree(pkgbuf);
2834}
2835
2836static int bnxt_get_eeprom(struct net_device *dev,
2837 struct ethtool_eeprom *eeprom,
2838 u8 *data)
2839{
2840 u32 index;
2841 u32 offset;
2842
2843 if (eeprom->offset == 0)
2844 return bnxt_get_nvram_directory(dev, eeprom->len, data);
2845
2846 index = eeprom->offset >> 24;
2847 offset = eeprom->offset & 0xffffff;
2848
2849 if (index == 0) {
2850 netdev_err(dev, "unsupported index value: %d\n", index);
2851 return -EINVAL;
2852 }
2853
2854 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
2855}
2856
2857static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
2858{
2859 struct bnxt *bp = netdev_priv(dev);
2860 struct hwrm_nvm_erase_dir_entry_input req = {0};
2861
2862 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
2863 req.dir_idx = cpu_to_le16(index);
2864 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2865}
2866
2867static int bnxt_set_eeprom(struct net_device *dev,
2868 struct ethtool_eeprom *eeprom,
2869 u8 *data)
2870{
2871 struct bnxt *bp = netdev_priv(dev);
2872 u8 index, dir_op;
2873 u16 type, ext, ordinal, attr;
2874
2875 if (!BNXT_PF(bp)) {
2876 netdev_err(dev, "NVM write not supported from a virtual function\n");
2877 return -EINVAL;
2878 }
2879
2880 type = eeprom->magic >> 16;
2881
2882 if (type == 0xffff) {
2883 index = eeprom->magic & 0xff;
2884 dir_op = eeprom->magic >> 8;
2885 if (index == 0)
2886 return -EINVAL;
2887 switch (dir_op) {
2888 case 0x0e:
2889 if (eeprom->offset != ~eeprom->magic)
2890 return -EINVAL;
2891 return bnxt_erase_nvram_directory(dev, index - 1);
2892 default:
2893 return -EINVAL;
2894 }
2895 }
2896
2897
2898 if (bnxt_dir_type_is_executable(type))
2899 return -EOPNOTSUPP;
2900 ext = eeprom->magic & 0xffff;
2901 ordinal = eeprom->offset >> 16;
2902 attr = eeprom->offset & 0xffff;
2903
2904 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
2905 eeprom->len);
2906}
2907
2908static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
2909{
2910 struct bnxt *bp = netdev_priv(dev);
2911 struct ethtool_eee *eee = &bp->eee;
2912 struct bnxt_link_info *link_info = &bp->link_info;
2913 u32 advertising;
2914 int rc = 0;
2915
2916 if (!BNXT_PHY_CFG_ABLE(bp))
2917 return -EOPNOTSUPP;
2918
2919 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
2920 return -EOPNOTSUPP;
2921
2922 mutex_lock(&bp->link_lock);
2923 advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
2924 if (!edata->eee_enabled)
2925 goto eee_ok;
2926
2927 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
2928 netdev_warn(dev, "EEE requires autoneg\n");
2929 rc = -EINVAL;
2930 goto eee_exit;
2931 }
2932 if (edata->tx_lpi_enabled) {
2933 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
2934 edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
2935 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
2936 bp->lpi_tmr_lo, bp->lpi_tmr_hi);
2937 rc = -EINVAL;
2938 goto eee_exit;
2939 } else if (!bp->lpi_tmr_hi) {
2940 edata->tx_lpi_timer = eee->tx_lpi_timer;
2941 }
2942 }
2943 if (!edata->advertised) {
2944 edata->advertised = advertising & eee->supported;
2945 } else if (edata->advertised & ~advertising) {
2946 netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
2947 edata->advertised, advertising);
2948 rc = -EINVAL;
2949 goto eee_exit;
2950 }
2951
2952 eee->advertised = edata->advertised;
2953 eee->tx_lpi_enabled = edata->tx_lpi_enabled;
2954 eee->tx_lpi_timer = edata->tx_lpi_timer;
2955eee_ok:
2956 eee->eee_enabled = edata->eee_enabled;
2957
2958 if (netif_running(dev))
2959 rc = bnxt_hwrm_set_link_setting(bp, false, true);
2960
2961eee_exit:
2962 mutex_unlock(&bp->link_lock);
2963 return rc;
2964}
2965
2966static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
2967{
2968 struct bnxt *bp = netdev_priv(dev);
2969
2970 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
2971 return -EOPNOTSUPP;
2972
2973 *edata = bp->eee;
2974 if (!bp->eee.eee_enabled) {
2975
2976
2977
2978 edata->advertised = 0;
2979 edata->tx_lpi_enabled = 0;
2980 }
2981
2982 if (!bp->eee.eee_active)
2983 edata->lp_advertised = 0;
2984
2985 return 0;
2986}
2987
2988static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
2989 u16 page_number, u16 start_addr,
2990 u16 data_length, u8 *buf)
2991{
2992 struct hwrm_port_phy_i2c_read_input req = {0};
2993 struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
2994 int rc, byte_offset = 0;
2995
2996 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
2997 req.i2c_slave_addr = i2c_addr;
2998 req.page_number = cpu_to_le16(page_number);
2999 req.port_id = cpu_to_le16(bp->pf.port_id);
3000 do {
3001 u16 xfer_size;
3002
3003 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
3004 data_length -= xfer_size;
3005 req.page_offset = cpu_to_le16(start_addr + byte_offset);
3006 req.data_length = xfer_size;
3007 req.enables = cpu_to_le32(start_addr + byte_offset ?
3008 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
3009 mutex_lock(&bp->hwrm_cmd_lock);
3010 rc = _hwrm_send_message(bp, &req, sizeof(req),
3011 HWRM_CMD_TIMEOUT);
3012 if (!rc)
3013 memcpy(buf + byte_offset, output->data, xfer_size);
3014 mutex_unlock(&bp->hwrm_cmd_lock);
3015 byte_offset += xfer_size;
3016 } while (!rc && data_length > 0);
3017
3018 return rc;
3019}
3020
3021static int bnxt_get_module_info(struct net_device *dev,
3022 struct ethtool_modinfo *modinfo)
3023{
3024 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
3025 struct bnxt *bp = netdev_priv(dev);
3026 int rc;
3027
3028
3029
3030
3031
3032 if (bp->link_info.module_status >
3033 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
3034 return -EOPNOTSUPP;
3035
3036
3037 if (bp->hwrm_spec_code < 0x10202)
3038 return -EOPNOTSUPP;
3039
3040 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
3041 SFF_DIAG_SUPPORT_OFFSET + 1,
3042 data);
3043 if (!rc) {
3044 u8 module_id = data[0];
3045 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
3046
3047 switch (module_id) {
3048 case SFF_MODULE_ID_SFP:
3049 modinfo->type = ETH_MODULE_SFF_8472;
3050 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3051 if (!diag_supported)
3052 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
3053 break;
3054 case SFF_MODULE_ID_QSFP:
3055 case SFF_MODULE_ID_QSFP_PLUS:
3056 modinfo->type = ETH_MODULE_SFF_8436;
3057 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
3058 break;
3059 case SFF_MODULE_ID_QSFP28:
3060 modinfo->type = ETH_MODULE_SFF_8636;
3061 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
3062 break;
3063 default:
3064 rc = -EOPNOTSUPP;
3065 break;
3066 }
3067 }
3068 return rc;
3069}
3070
3071static int bnxt_get_module_eeprom(struct net_device *dev,
3072 struct ethtool_eeprom *eeprom,
3073 u8 *data)
3074{
3075 struct bnxt *bp = netdev_priv(dev);
3076 u16 start = eeprom->offset, length = eeprom->len;
3077 int rc = 0;
3078
3079 memset(data, 0, eeprom->len);
3080
3081
3082 if (start < ETH_MODULE_SFF_8436_LEN) {
3083 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
3084 length = ETH_MODULE_SFF_8436_LEN - start;
3085 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
3086 start, length, data);
3087 if (rc)
3088 return rc;
3089 start += length;
3090 data += length;
3091 length = eeprom->len - length;
3092 }
3093
3094
3095 if (length) {
3096 start -= ETH_MODULE_SFF_8436_LEN;
3097 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0,
3098 start, length, data);
3099 }
3100 return rc;
3101}
3102
3103static int bnxt_nway_reset(struct net_device *dev)
3104{
3105 int rc = 0;
3106
3107 struct bnxt *bp = netdev_priv(dev);
3108 struct bnxt_link_info *link_info = &bp->link_info;
3109
3110 if (!BNXT_PHY_CFG_ABLE(bp))
3111 return -EOPNOTSUPP;
3112
3113 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
3114 return -EINVAL;
3115
3116 if (netif_running(dev))
3117 rc = bnxt_hwrm_set_link_setting(bp, true, false);
3118
3119 return rc;
3120}
3121
3122static int bnxt_set_phys_id(struct net_device *dev,
3123 enum ethtool_phys_id_state state)
3124{
3125 struct hwrm_port_led_cfg_input req = {0};
3126 struct bnxt *bp = netdev_priv(dev);
3127 struct bnxt_pf_info *pf = &bp->pf;
3128 struct bnxt_led_cfg *led_cfg;
3129 u8 led_state;
3130 __le16 duration;
3131 int i;
3132
3133 if (!bp->num_leds || BNXT_VF(bp))
3134 return -EOPNOTSUPP;
3135
3136 if (state == ETHTOOL_ID_ACTIVE) {
3137 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
3138 duration = cpu_to_le16(500);
3139 } else if (state == ETHTOOL_ID_INACTIVE) {
3140 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
3141 duration = cpu_to_le16(0);
3142 } else {
3143 return -EINVAL;
3144 }
3145 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
3146 req.port_id = cpu_to_le16(pf->port_id);
3147 req.num_leds = bp->num_leds;
3148 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3149 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3150 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3151 led_cfg->led_id = bp->leds[i].led_id;
3152 led_cfg->led_state = led_state;
3153 led_cfg->led_blink_on = duration;
3154 led_cfg->led_blink_off = duration;
3155 led_cfg->led_group_id = bp->leds[i].led_group_id;
3156 }
3157 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3158}
3159
3160static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
3161{
3162 struct hwrm_selftest_irq_input req = {0};
3163
3164 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1);
3165 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3166}
3167
3168static int bnxt_test_irq(struct bnxt *bp)
3169{
3170 int i;
3171
3172 for (i = 0; i < bp->cp_nr_rings; i++) {
3173 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
3174 int rc;
3175
3176 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
3177 if (rc)
3178 return rc;
3179 }
3180 return 0;
3181}
3182
3183static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
3184{
3185 struct hwrm_port_mac_cfg_input req = {0};
3186
3187 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
3188
3189 req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
3190 if (enable)
3191 req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
3192 else
3193 req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
3194 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3195}
3196
3197static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
3198{
3199 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3200 struct hwrm_port_phy_qcaps_input req = {0};
3201 int rc;
3202
3203 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
3204 mutex_lock(&bp->hwrm_cmd_lock);
3205 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3206 if (!rc)
3207 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
3208
3209 mutex_unlock(&bp->hwrm_cmd_lock);
3210 return rc;
3211}
3212
3213static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
3214 struct hwrm_port_phy_cfg_input *req)
3215{
3216 struct bnxt_link_info *link_info = &bp->link_info;
3217 u16 fw_advertising;
3218 u16 fw_speed;
3219 int rc;
3220
3221 if (!link_info->autoneg ||
3222 (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK))
3223 return 0;
3224
3225 rc = bnxt_query_force_speeds(bp, &fw_advertising);
3226 if (rc)
3227 return rc;
3228
3229 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
3230 if (bp->link_info.link_up)
3231 fw_speed = bp->link_info.link_speed;
3232 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
3233 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
3234 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
3235 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
3236 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
3237 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
3238 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
3239 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
3240
3241 req->force_link_speed = cpu_to_le16(fw_speed);
3242 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
3243 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
3244 rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT);
3245 req->flags = 0;
3246 req->force_link_speed = cpu_to_le16(0);
3247 return rc;
3248}
3249
3250static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
3251{
3252 struct hwrm_port_phy_cfg_input req = {0};
3253
3254 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
3255
3256 if (enable) {
3257 bnxt_disable_an_for_lpbk(bp, &req);
3258 if (ext)
3259 req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
3260 else
3261 req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
3262 } else {
3263 req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
3264 }
3265 req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
3266 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3267}
3268
3269static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3270 u32 raw_cons, int pkt_size)
3271{
3272 struct bnxt_napi *bnapi = cpr->bnapi;
3273 struct bnxt_rx_ring_info *rxr;
3274 struct bnxt_sw_rx_bd *rx_buf;
3275 struct rx_cmp *rxcmp;
3276 u16 cp_cons, cons;
3277 u8 *data;
3278 u32 len;
3279 int i;
3280
3281 rxr = bnapi->rx_ring;
3282 cp_cons = RING_CMP(raw_cons);
3283 rxcmp = (struct rx_cmp *)
3284 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3285 cons = rxcmp->rx_cmp_opaque;
3286 rx_buf = &rxr->rx_buf_ring[cons];
3287 data = rx_buf->data_ptr;
3288 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
3289 if (len != pkt_size)
3290 return -EIO;
3291 i = ETH_ALEN;
3292 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
3293 return -EIO;
3294 i += ETH_ALEN;
3295 for ( ; i < pkt_size; i++) {
3296 if (data[i] != (u8)(i & 0xff))
3297 return -EIO;
3298 }
3299 return 0;
3300}
3301
3302static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3303 int pkt_size)
3304{
3305 struct tx_cmp *txcmp;
3306 int rc = -EIO;
3307 u32 raw_cons;
3308 u32 cons;
3309 int i;
3310
3311 raw_cons = cpr->cp_raw_cons;
3312 for (i = 0; i < 200; i++) {
3313 cons = RING_CMP(raw_cons);
3314 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3315
3316 if (!TX_CMP_VALID(txcmp, raw_cons)) {
3317 udelay(5);
3318 continue;
3319 }
3320
3321
3322
3323
3324 dma_rmb();
3325 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
3326 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
3327 raw_cons = NEXT_RAW_CMP(raw_cons);
3328 raw_cons = NEXT_RAW_CMP(raw_cons);
3329 break;
3330 }
3331 raw_cons = NEXT_RAW_CMP(raw_cons);
3332 }
3333 cpr->cp_raw_cons = raw_cons;
3334 return rc;
3335}
3336
3337static int bnxt_run_loopback(struct bnxt *bp)
3338{
3339 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
3340 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
3341 struct bnxt_cp_ring_info *cpr;
3342 int pkt_size, i = 0;
3343 struct sk_buff *skb;
3344 dma_addr_t map;
3345 u8 *data;
3346 int rc;
3347
3348 cpr = &rxr->bnapi->cp_ring;
3349 if (bp->flags & BNXT_FLAG_CHIP_P5)
3350 cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
3351 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
3352 skb = netdev_alloc_skb(bp->dev, pkt_size);
3353 if (!skb)
3354 return -ENOMEM;
3355 data = skb_put(skb, pkt_size);
3356 eth_broadcast_addr(data);
3357 i += ETH_ALEN;
3358 ether_addr_copy(&data[i], bp->dev->dev_addr);
3359 i += ETH_ALEN;
3360 for ( ; i < pkt_size; i++)
3361 data[i] = (u8)(i & 0xff);
3362
3363 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
3364 PCI_DMA_TODEVICE);
3365 if (dma_mapping_error(&bp->pdev->dev, map)) {
3366 dev_kfree_skb(skb);
3367 return -EIO;
3368 }
3369 bnxt_xmit_bd(bp, txr, map, pkt_size);
3370
3371
3372 wmb();
3373
3374 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
3375 rc = bnxt_poll_loopback(bp, cpr, pkt_size);
3376
3377 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
3378 dev_kfree_skb(skb);
3379 return rc;
3380}
3381
3382static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
3383{
3384 struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr;
3385 struct hwrm_selftest_exec_input req = {0};
3386 int rc;
3387
3388 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
3389 mutex_lock(&bp->hwrm_cmd_lock);
3390 resp->test_success = 0;
3391 req.flags = test_mask;
3392 rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
3393 *test_results = resp->test_success;
3394 mutex_unlock(&bp->hwrm_cmd_lock);
3395 return rc;
3396}
3397
3398#define BNXT_DRV_TESTS 4
3399#define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS)
3400#define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1)
3401#define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
3402#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3)
3403
3404static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
3405 u64 *buf)
3406{
3407 struct bnxt *bp = netdev_priv(dev);
3408 bool do_ext_lpbk = false;
3409 bool offline = false;
3410 u8 test_results = 0;
3411 u8 test_mask = 0;
3412 int rc = 0, i;
3413
3414 if (!bp->num_tests || !BNXT_PF(bp))
3415 return;
3416 memset(buf, 0, sizeof(u64) * bp->num_tests);
3417 if (!netif_running(dev)) {
3418 etest->flags |= ETH_TEST_FL_FAILED;
3419 return;
3420 }
3421
3422 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
3423 (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK))
3424 do_ext_lpbk = true;
3425
3426 if (etest->flags & ETH_TEST_FL_OFFLINE) {
3427 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) {
3428 etest->flags |= ETH_TEST_FL_FAILED;
3429 netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n");
3430 return;
3431 }
3432 offline = true;
3433 }
3434
3435 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
3436 u8 bit_val = 1 << i;
3437
3438 if (!(bp->test_info->offline_mask & bit_val))
3439 test_mask |= bit_val;
3440 else if (offline)
3441 test_mask |= bit_val;
3442 }
3443 if (!offline) {
3444 bnxt_run_fw_tests(bp, test_mask, &test_results);
3445 } else {
3446 rc = bnxt_close_nic(bp, false, false);
3447 if (rc)
3448 return;
3449 bnxt_run_fw_tests(bp, test_mask, &test_results);
3450
3451 buf[BNXT_MACLPBK_TEST_IDX] = 1;
3452 bnxt_hwrm_mac_loopback(bp, true);
3453 msleep(250);
3454 rc = bnxt_half_open_nic(bp);
3455 if (rc) {
3456 bnxt_hwrm_mac_loopback(bp, false);
3457 etest->flags |= ETH_TEST_FL_FAILED;
3458 return;
3459 }
3460 if (bnxt_run_loopback(bp))
3461 etest->flags |= ETH_TEST_FL_FAILED;
3462 else
3463 buf[BNXT_MACLPBK_TEST_IDX] = 0;
3464
3465 bnxt_hwrm_mac_loopback(bp, false);
3466 bnxt_hwrm_phy_loopback(bp, true, false);
3467 msleep(1000);
3468 if (bnxt_run_loopback(bp)) {
3469 buf[BNXT_PHYLPBK_TEST_IDX] = 1;
3470 etest->flags |= ETH_TEST_FL_FAILED;
3471 }
3472 if (do_ext_lpbk) {
3473 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
3474 bnxt_hwrm_phy_loopback(bp, true, true);
3475 msleep(1000);
3476 if (bnxt_run_loopback(bp)) {
3477 buf[BNXT_EXTLPBK_TEST_IDX] = 1;
3478 etest->flags |= ETH_TEST_FL_FAILED;
3479 }
3480 }
3481 bnxt_hwrm_phy_loopback(bp, false, false);
3482 bnxt_half_close_nic(bp);
3483 rc = bnxt_open_nic(bp, false, true);
3484 }
3485 if (rc || bnxt_test_irq(bp)) {
3486 buf[BNXT_IRQ_TEST_IDX] = 1;
3487 etest->flags |= ETH_TEST_FL_FAILED;
3488 }
3489 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
3490 u8 bit_val = 1 << i;
3491
3492 if ((test_mask & bit_val) && !(test_results & bit_val)) {
3493 buf[i] = 1;
3494 etest->flags |= ETH_TEST_FL_FAILED;
3495 }
3496 }
3497}
3498
3499static int bnxt_reset(struct net_device *dev, u32 *flags)
3500{
3501 struct bnxt *bp = netdev_priv(dev);
3502 bool reload = false;
3503 u32 req = *flags;
3504
3505 if (!req)
3506 return -EINVAL;
3507
3508 if (!BNXT_PF(bp)) {
3509 netdev_err(dev, "Reset is not supported from a VF\n");
3510 return -EOPNOTSUPP;
3511 }
3512
3513 if (pci_vfs_assigned(bp->pdev) &&
3514 !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
3515 netdev_err(dev,
3516 "Reset not allowed when VFs are assigned to VMs\n");
3517 return -EBUSY;
3518 }
3519
3520 if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
3521
3522 if (bp->hwrm_spec_code >= 0x10803) {
3523 if (!bnxt_firmware_reset_chip(dev)) {
3524 netdev_info(dev, "Firmware reset request successful.\n");
3525 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
3526 reload = true;
3527 *flags &= ~BNXT_FW_RESET_CHIP;
3528 }
3529 } else if (req == BNXT_FW_RESET_CHIP) {
3530 return -EOPNOTSUPP;
3531 }
3532 }
3533
3534 if (req & BNXT_FW_RESET_AP) {
3535
3536 if (bp->hwrm_spec_code >= 0x10803) {
3537 if (!bnxt_firmware_reset_ap(dev)) {
3538 netdev_info(dev, "Reset application processor successful.\n");
3539 reload = true;
3540 *flags &= ~BNXT_FW_RESET_AP;
3541 }
3542 } else if (req == BNXT_FW_RESET_AP) {
3543 return -EOPNOTSUPP;
3544 }
3545 }
3546
3547 if (reload)
3548 netdev_info(dev, "Reload driver to complete reset\n");
3549
3550 return 0;
3551}
3552
3553static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
3554 struct bnxt_hwrm_dbg_dma_info *info)
3555{
3556 struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr;
3557 struct hwrm_dbg_cmn_input *cmn_req = msg;
3558 __le16 *seq_ptr = msg + info->seq_off;
3559 u16 seq = 0, len, segs_off;
3560 void *resp = cmn_resp;
3561 dma_addr_t dma_handle;
3562 int rc, off = 0;
3563 void *dma_buf;
3564
3565 dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle,
3566 GFP_KERNEL);
3567 if (!dma_buf)
3568 return -ENOMEM;
3569
3570 segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
3571 total_segments);
3572 cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
3573 cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
3574 mutex_lock(&bp->hwrm_cmd_lock);
3575 while (1) {
3576 *seq_ptr = cpu_to_le16(seq);
3577 rc = _hwrm_send_message(bp, msg, msg_len,
3578 HWRM_COREDUMP_TIMEOUT);
3579 if (rc)
3580 break;
3581
3582 len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
3583 if (!seq &&
3584 cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
3585 info->segs = le16_to_cpu(*((__le16 *)(resp +
3586 segs_off)));
3587 if (!info->segs) {
3588 rc = -EIO;
3589 break;
3590 }
3591
3592 info->dest_buf_size = info->segs *
3593 sizeof(struct coredump_segment_record);
3594 info->dest_buf = kmalloc(info->dest_buf_size,
3595 GFP_KERNEL);
3596 if (!info->dest_buf) {
3597 rc = -ENOMEM;
3598 break;
3599 }
3600 }
3601
3602 if (info->dest_buf) {
3603 if ((info->seg_start + off + len) <=
3604 BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
3605 memcpy(info->dest_buf + off, dma_buf, len);
3606 } else {
3607 rc = -ENOBUFS;
3608 break;
3609 }
3610 }
3611
3612 if (cmn_req->req_type ==
3613 cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
3614 info->dest_buf_size += len;
3615
3616 if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
3617 break;
3618
3619 seq++;
3620 off += len;
3621 }
3622 mutex_unlock(&bp->hwrm_cmd_lock);
3623 dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle);
3624 return rc;
3625}
3626
3627static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
3628 struct bnxt_coredump *coredump)
3629{
3630 struct hwrm_dbg_coredump_list_input req = {0};
3631 struct bnxt_hwrm_dbg_dma_info info = {NULL};
3632 int rc;
3633
3634 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1);
3635
3636 info.dma_len = COREDUMP_LIST_BUF_LEN;
3637 info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
3638 info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
3639 data_len);
3640
3641 rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
3642 if (!rc) {
3643 coredump->data = info.dest_buf;
3644 coredump->data_size = info.dest_buf_size;
3645 coredump->total_segs = info.segs;
3646 }
3647 return rc;
3648}
3649
3650static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
3651 u16 segment_id)
3652{
3653 struct hwrm_dbg_coredump_initiate_input req = {0};
3654
3655 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1);
3656 req.component_id = cpu_to_le16(component_id);
3657 req.segment_id = cpu_to_le16(segment_id);
3658
3659 return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT);
3660}
3661
3662static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
3663 u16 segment_id, u32 *seg_len,
3664 void *buf, u32 buf_len, u32 offset)
3665{
3666 struct hwrm_dbg_coredump_retrieve_input req = {0};
3667 struct bnxt_hwrm_dbg_dma_info info = {NULL};
3668 int rc;
3669
3670 bnxt_hwrm_cmd_hdr_init(bp, &req