1
2
3
4
5
6
7
8
9
10
11
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17
18#include <linux/stringify.h>
19#include <linux/kernel.h>
20#include <linux/timer.h>
21#include <linux/errno.h>
22#include <linux/ioport.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/dma-mapping.h>
31#include <linux/bitops.h>
32#include <asm/io.h>
33#include <asm/irq.h>
34#include <linux/delay.h>
35#include <asm/byteorder.h>
36#include <asm/page.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
40#include <linux/if.h>
41#include <linux/if_vlan.h>
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/firmware.h>
50#include <linux/log2.h>
51#include <linux/aer.h>
52#include <linux/crash_dump.h>
53
54#if IS_ENABLED(CONFIG_CNIC)
55#define BCM_CNIC 1
56#include "cnic_if.h"
57#endif
58#include "bnx2.h"
59#include "bnx2_fw.h"
60
61#define DRV_MODULE_NAME "bnx2"
62#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
63#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
64#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
65#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
66#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
67
68#define RUN_AT(x) (jiffies + (x))
69
70
71#define TX_TIMEOUT (5*HZ)
72
73MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
74MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
75MODULE_LICENSE("GPL");
76MODULE_FIRMWARE(FW_MIPS_FILE_06);
77MODULE_FIRMWARE(FW_RV2P_FILE_06);
78MODULE_FIRMWARE(FW_MIPS_FILE_09);
79MODULE_FIRMWARE(FW_RV2P_FILE_09);
80MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
81
82static int disable_msi = 0;
83
84module_param(disable_msi, int, 0444);
85MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
86
87typedef enum {
88 BCM5706 = 0,
89 NC370T,
90 NC370I,
91 BCM5706S,
92 NC370F,
93 BCM5708,
94 BCM5708S,
95 BCM5709,
96 BCM5709S,
97 BCM5716,
98 BCM5716S,
99} board_t;
100
101
102static struct {
103 char *name;
104} board_info[] = {
105 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
106 { "HP NC370T Multifunction Gigabit Server Adapter" },
107 { "HP NC370i Multifunction Gigabit Server Adapter" },
108 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
109 { "HP NC370F Multifunction Gigabit Server Adapter" },
110 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
111 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
112 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
113 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
114 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
115 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
116 };
117
118static const struct pci_device_id bnx2_pci_tbl[] = {
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
120 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
122 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
128 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
131 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
133 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
135 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
137 { PCI_VENDOR_ID_BROADCOM, 0x163b,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
139 { PCI_VENDOR_ID_BROADCOM, 0x163c,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
141 { 0, }
142};
143
144static const struct flash_spec flash_table[] =
145{
146#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
147#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
148
149 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
150 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
151 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
152 "EEPROM - slow"},
153
154 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
155 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
157 "Entry 0001"},
158
159
160 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
161 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
163 "Non-buffered flash (128kB)"},
164
165
166 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
169 "Non-buffered flash (256kB)"},
170
171 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
174 "Entry 0100"},
175
176 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
177 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
178 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
179 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
180
181 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
184 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
185
186
187 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
190 "Non-buffered flash (64kB)"},
191
192 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
193 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
194 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
195 "EEPROM - fast"},
196
197 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1001"},
201
202 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1010"},
206
207 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
210 "Buffered flash (128kB)"},
211
212 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
213 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
214 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
215 "Entry 1100"},
216
217 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 "Entry 1101"},
221
222 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
223 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
224 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
225 "Entry 1110 (Atmel)"},
226
227 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
230 "Buffered flash (256kB)"},
231};
232
233static const struct flash_spec flash_5709 = {
234 .flags = BNX2_NV_BUFFERED,
235 .page_bits = BCM5709_FLASH_PAGE_BITS,
236 .page_size = BCM5709_FLASH_PAGE_SIZE,
237 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
238 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
239 .name = "5709 Buffered flash (256kB)",
240};
241
242MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
243
244static void bnx2_init_napi(struct bnx2 *bp);
245static void bnx2_del_napi(struct bnx2 *bp);
246
247static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
248{
249 u32 diff;
250
251
252
253
254 diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
255 if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
256 diff &= 0xffff;
257 if (diff == BNX2_TX_DESC_CNT)
258 diff = BNX2_MAX_TX_DESC_CNT;
259 }
260 return bp->tx_ring_size - diff;
261}
262
263static u32
264bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
265{
266 unsigned long flags;
267 u32 val;
268
269 spin_lock_irqsave(&bp->indirect_lock, flags);
270 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
271 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
272 spin_unlock_irqrestore(&bp->indirect_lock, flags);
273 return val;
274}
275
276static void
277bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
278{
279 unsigned long flags;
280
281 spin_lock_irqsave(&bp->indirect_lock, flags);
282 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
283 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
284 spin_unlock_irqrestore(&bp->indirect_lock, flags);
285}
286
287static void
288bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
289{
290 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
291}
292
293static u32
294bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
295{
296 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
297}
298
299static void
300bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
301{
302 unsigned long flags;
303
304 offset += cid_addr;
305 spin_lock_irqsave(&bp->indirect_lock, flags);
306 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
307 int i;
308
309 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
310 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
311 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
312 for (i = 0; i < 5; i++) {
313 val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
314 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
315 break;
316 udelay(5);
317 }
318 } else {
319 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
320 BNX2_WR(bp, BNX2_CTX_DATA, val);
321 }
322 spin_unlock_irqrestore(&bp->indirect_lock, flags);
323}
324
325#ifdef BCM_CNIC
326static int
327bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
328{
329 struct bnx2 *bp = netdev_priv(dev);
330 struct drv_ctl_io *io = &info->data.io;
331
332 switch (info->cmd) {
333 case DRV_CTL_IO_WR_CMD:
334 bnx2_reg_wr_ind(bp, io->offset, io->data);
335 break;
336 case DRV_CTL_IO_RD_CMD:
337 io->data = bnx2_reg_rd_ind(bp, io->offset);
338 break;
339 case DRV_CTL_CTX_WR_CMD:
340 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
341 break;
342 default:
343 return -EINVAL;
344 }
345 return 0;
346}
347
348static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
349{
350 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
351 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
352 int sb_id;
353
354 if (bp->flags & BNX2_FLAG_USING_MSIX) {
355 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
356 bnapi->cnic_present = 0;
357 sb_id = bp->irq_nvecs;
358 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
359 } else {
360 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
361 bnapi->cnic_tag = bnapi->last_status_idx;
362 bnapi->cnic_present = 1;
363 sb_id = 0;
364 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
365 }
366
367 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
368 cp->irq_arr[0].status_blk = (void *)
369 ((unsigned long) bnapi->status_blk.msi +
370 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
371 cp->irq_arr[0].status_blk_num = sb_id;
372 cp->num_irq = 1;
373}
374
375static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
376 void *data)
377{
378 struct bnx2 *bp = netdev_priv(dev);
379 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
380
381 if (!ops)
382 return -EINVAL;
383
384 if (cp->drv_state & CNIC_DRV_STATE_REGD)
385 return -EBUSY;
386
387 if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
388 return -ENODEV;
389
390 bp->cnic_data = data;
391 rcu_assign_pointer(bp->cnic_ops, ops);
392
393 cp->num_irq = 0;
394 cp->drv_state = CNIC_DRV_STATE_REGD;
395
396 bnx2_setup_cnic_irq_info(bp);
397
398 return 0;
399}
400
401static int bnx2_unregister_cnic(struct net_device *dev)
402{
403 struct bnx2 *bp = netdev_priv(dev);
404 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
405 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
406
407 mutex_lock(&bp->cnic_lock);
408 cp->drv_state = 0;
409 bnapi->cnic_present = 0;
410 RCU_INIT_POINTER(bp->cnic_ops, NULL);
411 mutex_unlock(&bp->cnic_lock);
412 synchronize_rcu();
413 return 0;
414}
415
416static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
417{
418 struct bnx2 *bp = netdev_priv(dev);
419 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
420
421 if (!cp->max_iscsi_conn)
422 return NULL;
423
424 cp->drv_owner = THIS_MODULE;
425 cp->chip_id = bp->chip_id;
426 cp->pdev = bp->pdev;
427 cp->io_base = bp->regview;
428 cp->drv_ctl = bnx2_drv_ctl;
429 cp->drv_register_cnic = bnx2_register_cnic;
430 cp->drv_unregister_cnic = bnx2_unregister_cnic;
431
432 return cp;
433}
434
435static void
436bnx2_cnic_stop(struct bnx2 *bp)
437{
438 struct cnic_ops *c_ops;
439 struct cnic_ctl_info info;
440
441 mutex_lock(&bp->cnic_lock);
442 c_ops = rcu_dereference_protected(bp->cnic_ops,
443 lockdep_is_held(&bp->cnic_lock));
444 if (c_ops) {
445 info.cmd = CNIC_CTL_STOP_CMD;
446 c_ops->cnic_ctl(bp->cnic_data, &info);
447 }
448 mutex_unlock(&bp->cnic_lock);
449}
450
451static void
452bnx2_cnic_start(struct bnx2 *bp)
453{
454 struct cnic_ops *c_ops;
455 struct cnic_ctl_info info;
456
457 mutex_lock(&bp->cnic_lock);
458 c_ops = rcu_dereference_protected(bp->cnic_ops,
459 lockdep_is_held(&bp->cnic_lock));
460 if (c_ops) {
461 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
462 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
463
464 bnapi->cnic_tag = bnapi->last_status_idx;
465 }
466 info.cmd = CNIC_CTL_START_CMD;
467 c_ops->cnic_ctl(bp->cnic_data, &info);
468 }
469 mutex_unlock(&bp->cnic_lock);
470}
471
472#else
473
474static void
475bnx2_cnic_stop(struct bnx2 *bp)
476{
477}
478
479static void
480bnx2_cnic_start(struct bnx2 *bp)
481{
482}
483
484#endif
485
486static int
487bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
488{
489 u32 val1;
490 int i, ret;
491
492 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
493 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
494 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
495
496 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
497 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
498
499 udelay(40);
500 }
501
502 val1 = (bp->phy_addr << 21) | (reg << 16) |
503 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
504 BNX2_EMAC_MDIO_COMM_START_BUSY;
505 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
506
507 for (i = 0; i < 50; i++) {
508 udelay(10);
509
510 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
511 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
512 udelay(5);
513
514 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
515 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
516
517 break;
518 }
519 }
520
521 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
522 *val = 0x0;
523 ret = -EBUSY;
524 }
525 else {
526 *val = val1;
527 ret = 0;
528 }
529
530 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
531 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
532 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
533
534 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
535 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
536
537 udelay(40);
538 }
539
540 return ret;
541}
542
543static int
544bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
545{
546 u32 val1;
547 int i, ret;
548
549 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
550 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
551 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
552
553 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
554 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
555
556 udelay(40);
557 }
558
559 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
560 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
561 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
562 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
563
564 for (i = 0; i < 50; i++) {
565 udelay(10);
566
567 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
568 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
569 udelay(5);
570 break;
571 }
572 }
573
574 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
575 ret = -EBUSY;
576 else
577 ret = 0;
578
579 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
580 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
581 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
582
583 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
584 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
585
586 udelay(40);
587 }
588
589 return ret;
590}
591
592static void
593bnx2_disable_int(struct bnx2 *bp)
594{
595 int i;
596 struct bnx2_napi *bnapi;
597
598 for (i = 0; i < bp->irq_nvecs; i++) {
599 bnapi = &bp->bnx2_napi[i];
600 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
601 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
602 }
603 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
604}
605
606static void
607bnx2_enable_int(struct bnx2 *bp)
608{
609 int i;
610 struct bnx2_napi *bnapi;
611
612 for (i = 0; i < bp->irq_nvecs; i++) {
613 bnapi = &bp->bnx2_napi[i];
614
615 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
616 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
617 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
618 bnapi->last_status_idx);
619
620 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
621 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
622 bnapi->last_status_idx);
623 }
624 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
625}
626
627static void
628bnx2_disable_int_sync(struct bnx2 *bp)
629{
630 int i;
631
632 atomic_inc(&bp->intr_sem);
633 if (!netif_running(bp->dev))
634 return;
635
636 bnx2_disable_int(bp);
637 for (i = 0; i < bp->irq_nvecs; i++)
638 synchronize_irq(bp->irq_tbl[i].vector);
639}
640
641static void
642bnx2_napi_disable(struct bnx2 *bp)
643{
644 int i;
645
646 for (i = 0; i < bp->irq_nvecs; i++)
647 napi_disable(&bp->bnx2_napi[i].napi);
648}
649
650static void
651bnx2_napi_enable(struct bnx2 *bp)
652{
653 int i;
654
655 for (i = 0; i < bp->irq_nvecs; i++)
656 napi_enable(&bp->bnx2_napi[i].napi);
657}
658
659static void
660bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
661{
662 if (stop_cnic)
663 bnx2_cnic_stop(bp);
664 if (netif_running(bp->dev)) {
665 bnx2_napi_disable(bp);
666 netif_tx_disable(bp->dev);
667 }
668 bnx2_disable_int_sync(bp);
669 netif_carrier_off(bp->dev);
670}
671
672static void
673bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
674{
675 if (atomic_dec_and_test(&bp->intr_sem)) {
676 if (netif_running(bp->dev)) {
677 netif_tx_wake_all_queues(bp->dev);
678 spin_lock_bh(&bp->phy_lock);
679 if (bp->link_up)
680 netif_carrier_on(bp->dev);
681 spin_unlock_bh(&bp->phy_lock);
682 bnx2_napi_enable(bp);
683 bnx2_enable_int(bp);
684 if (start_cnic)
685 bnx2_cnic_start(bp);
686 }
687 }
688}
689
690static void
691bnx2_free_tx_mem(struct bnx2 *bp)
692{
693 int i;
694
695 for (i = 0; i < bp->num_tx_rings; i++) {
696 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
697 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
698
699 if (txr->tx_desc_ring) {
700 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
701 txr->tx_desc_ring,
702 txr->tx_desc_mapping);
703 txr->tx_desc_ring = NULL;
704 }
705 kfree(txr->tx_buf_ring);
706 txr->tx_buf_ring = NULL;
707 }
708}
709
710static void
711bnx2_free_rx_mem(struct bnx2 *bp)
712{
713 int i;
714
715 for (i = 0; i < bp->num_rx_rings; i++) {
716 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
717 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
718 int j;
719
720 for (j = 0; j < bp->rx_max_ring; j++) {
721 if (rxr->rx_desc_ring[j])
722 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
723 rxr->rx_desc_ring[j],
724 rxr->rx_desc_mapping[j]);
725 rxr->rx_desc_ring[j] = NULL;
726 }
727 vfree(rxr->rx_buf_ring);
728 rxr->rx_buf_ring = NULL;
729
730 for (j = 0; j < bp->rx_max_pg_ring; j++) {
731 if (rxr->rx_pg_desc_ring[j])
732 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
733 rxr->rx_pg_desc_ring[j],
734 rxr->rx_pg_desc_mapping[j]);
735 rxr->rx_pg_desc_ring[j] = NULL;
736 }
737 vfree(rxr->rx_pg_ring);
738 rxr->rx_pg_ring = NULL;
739 }
740}
741
742static int
743bnx2_alloc_tx_mem(struct bnx2 *bp)
744{
745 int i;
746
747 for (i = 0; i < bp->num_tx_rings; i++) {
748 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
749 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
750
751 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
752 if (!txr->tx_buf_ring)
753 return -ENOMEM;
754
755 txr->tx_desc_ring =
756 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
757 &txr->tx_desc_mapping, GFP_KERNEL);
758 if (!txr->tx_desc_ring)
759 return -ENOMEM;
760 }
761 return 0;
762}
763
764static int
765bnx2_alloc_rx_mem(struct bnx2 *bp)
766{
767 int i;
768
769 for (i = 0; i < bp->num_rx_rings; i++) {
770 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
771 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
772 int j;
773
774 rxr->rx_buf_ring =
775 vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
776 if (!rxr->rx_buf_ring)
777 return -ENOMEM;
778
779 for (j = 0; j < bp->rx_max_ring; j++) {
780 rxr->rx_desc_ring[j] =
781 dma_alloc_coherent(&bp->pdev->dev,
782 RXBD_RING_SIZE,
783 &rxr->rx_desc_mapping[j],
784 GFP_KERNEL);
785 if (!rxr->rx_desc_ring[j])
786 return -ENOMEM;
787
788 }
789
790 if (bp->rx_pg_ring_size) {
791 rxr->rx_pg_ring =
792 vzalloc(array_size(SW_RXPG_RING_SIZE,
793 bp->rx_max_pg_ring));
794 if (!rxr->rx_pg_ring)
795 return -ENOMEM;
796
797 }
798
799 for (j = 0; j < bp->rx_max_pg_ring; j++) {
800 rxr->rx_pg_desc_ring[j] =
801 dma_alloc_coherent(&bp->pdev->dev,
802 RXBD_RING_SIZE,
803 &rxr->rx_pg_desc_mapping[j],
804 GFP_KERNEL);
805 if (!rxr->rx_pg_desc_ring[j])
806 return -ENOMEM;
807
808 }
809 }
810 return 0;
811}
812
813static void
814bnx2_free_stats_blk(struct net_device *dev)
815{
816 struct bnx2 *bp = netdev_priv(dev);
817
818 if (bp->status_blk) {
819 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
820 bp->status_blk,
821 bp->status_blk_mapping);
822 bp->status_blk = NULL;
823 bp->stats_blk = NULL;
824 }
825}
826
827static int
828bnx2_alloc_stats_blk(struct net_device *dev)
829{
830 int status_blk_size;
831 void *status_blk;
832 struct bnx2 *bp = netdev_priv(dev);
833
834
835 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
836 if (bp->flags & BNX2_FLAG_MSIX_CAP)
837 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
838 BNX2_SBLK_MSIX_ALIGN_SIZE);
839 bp->status_stats_size = status_blk_size +
840 sizeof(struct statistics_block);
841 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
842 &bp->status_blk_mapping, GFP_KERNEL);
843 if (!status_blk)
844 return -ENOMEM;
845
846 bp->status_blk = status_blk;
847 bp->stats_blk = status_blk + status_blk_size;
848 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
849
850 return 0;
851}
852
853static void
854bnx2_free_mem(struct bnx2 *bp)
855{
856 int i;
857 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
858
859 bnx2_free_tx_mem(bp);
860 bnx2_free_rx_mem(bp);
861
862 for (i = 0; i < bp->ctx_pages; i++) {
863 if (bp->ctx_blk[i]) {
864 dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
865 bp->ctx_blk[i],
866 bp->ctx_blk_mapping[i]);
867 bp->ctx_blk[i] = NULL;
868 }
869 }
870
871 if (bnapi->status_blk.msi)
872 bnapi->status_blk.msi = NULL;
873}
874
875static int
876bnx2_alloc_mem(struct bnx2 *bp)
877{
878 int i, err;
879 struct bnx2_napi *bnapi;
880
881 bnapi = &bp->bnx2_napi[0];
882 bnapi->status_blk.msi = bp->status_blk;
883 bnapi->hw_tx_cons_ptr =
884 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
885 bnapi->hw_rx_cons_ptr =
886 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
887 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
888 for (i = 1; i < bp->irq_nvecs; i++) {
889 struct status_block_msix *sblk;
890
891 bnapi = &bp->bnx2_napi[i];
892
893 sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
894 bnapi->status_blk.msix = sblk;
895 bnapi->hw_tx_cons_ptr =
896 &sblk->status_tx_quick_consumer_index;
897 bnapi->hw_rx_cons_ptr =
898 &sblk->status_rx_quick_consumer_index;
899 bnapi->int_num = i << 24;
900 }
901 }
902
903 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
904 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
905 if (bp->ctx_pages == 0)
906 bp->ctx_pages = 1;
907 for (i = 0; i < bp->ctx_pages; i++) {
908 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
909 BNX2_PAGE_SIZE,
910 &bp->ctx_blk_mapping[i],
911 GFP_KERNEL);
912 if (!bp->ctx_blk[i])
913 goto alloc_mem_err;
914 }
915 }
916
917 err = bnx2_alloc_rx_mem(bp);
918 if (err)
919 goto alloc_mem_err;
920
921 err = bnx2_alloc_tx_mem(bp);
922 if (err)
923 goto alloc_mem_err;
924
925 return 0;
926
927alloc_mem_err:
928 bnx2_free_mem(bp);
929 return -ENOMEM;
930}
931
932static void
933bnx2_report_fw_link(struct bnx2 *bp)
934{
935 u32 fw_link_status = 0;
936
937 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
938 return;
939
940 if (bp->link_up) {
941 u32 bmsr;
942
943 switch (bp->line_speed) {
944 case SPEED_10:
945 if (bp->duplex == DUPLEX_HALF)
946 fw_link_status = BNX2_LINK_STATUS_10HALF;
947 else
948 fw_link_status = BNX2_LINK_STATUS_10FULL;
949 break;
950 case SPEED_100:
951 if (bp->duplex == DUPLEX_HALF)
952 fw_link_status = BNX2_LINK_STATUS_100HALF;
953 else
954 fw_link_status = BNX2_LINK_STATUS_100FULL;
955 break;
956 case SPEED_1000:
957 if (bp->duplex == DUPLEX_HALF)
958 fw_link_status = BNX2_LINK_STATUS_1000HALF;
959 else
960 fw_link_status = BNX2_LINK_STATUS_1000FULL;
961 break;
962 case SPEED_2500:
963 if (bp->duplex == DUPLEX_HALF)
964 fw_link_status = BNX2_LINK_STATUS_2500HALF;
965 else
966 fw_link_status = BNX2_LINK_STATUS_2500FULL;
967 break;
968 }
969
970 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
971
972 if (bp->autoneg) {
973 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
974
975 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
976 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
977
978 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
979 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
980 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
981 else
982 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
983 }
984 }
985 else
986 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
987
988 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
989}
990
991static char *
992bnx2_xceiver_str(struct bnx2 *bp)
993{
994 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
995 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
996 "Copper");
997}
998
999static void
1000bnx2_report_link(struct bnx2 *bp)
1001{
1002 if (bp->link_up) {
1003 netif_carrier_on(bp->dev);
1004 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1005 bnx2_xceiver_str(bp),
1006 bp->line_speed,
1007 bp->duplex == DUPLEX_FULL ? "full" : "half");
1008
1009 if (bp->flow_ctrl) {
1010 if (bp->flow_ctrl & FLOW_CTRL_RX) {
1011 pr_cont(", receive ");
1012 if (bp->flow_ctrl & FLOW_CTRL_TX)
1013 pr_cont("& transmit ");
1014 }
1015 else {
1016 pr_cont(", transmit ");
1017 }
1018 pr_cont("flow control ON");
1019 }
1020 pr_cont("\n");
1021 } else {
1022 netif_carrier_off(bp->dev);
1023 netdev_err(bp->dev, "NIC %s Link is Down\n",
1024 bnx2_xceiver_str(bp));
1025 }
1026
1027 bnx2_report_fw_link(bp);
1028}
1029
1030static void
1031bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1032{
1033 u32 local_adv, remote_adv;
1034
1035 bp->flow_ctrl = 0;
1036 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1037 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1038
1039 if (bp->duplex == DUPLEX_FULL) {
1040 bp->flow_ctrl = bp->req_flow_ctrl;
1041 }
1042 return;
1043 }
1044
1045 if (bp->duplex != DUPLEX_FULL) {
1046 return;
1047 }
1048
1049 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1050 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1051 u32 val;
1052
1053 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1054 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1055 bp->flow_ctrl |= FLOW_CTRL_TX;
1056 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1057 bp->flow_ctrl |= FLOW_CTRL_RX;
1058 return;
1059 }
1060
1061 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1062 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1063
1064 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1065 u32 new_local_adv = 0;
1066 u32 new_remote_adv = 0;
1067
1068 if (local_adv & ADVERTISE_1000XPAUSE)
1069 new_local_adv |= ADVERTISE_PAUSE_CAP;
1070 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1071 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1072 if (remote_adv & ADVERTISE_1000XPAUSE)
1073 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1074 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1075 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1076
1077 local_adv = new_local_adv;
1078 remote_adv = new_remote_adv;
1079 }
1080
1081
1082 if (local_adv & ADVERTISE_PAUSE_CAP) {
1083 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1084 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1085 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1086 }
1087 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1088 bp->flow_ctrl = FLOW_CTRL_RX;
1089 }
1090 }
1091 else {
1092 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1093 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1094 }
1095 }
1096 }
1097 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1098 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1099 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1100
1101 bp->flow_ctrl = FLOW_CTRL_TX;
1102 }
1103 }
1104}
1105
1106static int
1107bnx2_5709s_linkup(struct bnx2 *bp)
1108{
1109 u32 val, speed;
1110
1111 bp->link_up = 1;
1112
1113 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1114 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1115 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1116
1117 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1118 bp->line_speed = bp->req_line_speed;
1119 bp->duplex = bp->req_duplex;
1120 return 0;
1121 }
1122 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1123 switch (speed) {
1124 case MII_BNX2_GP_TOP_AN_SPEED_10:
1125 bp->line_speed = SPEED_10;
1126 break;
1127 case MII_BNX2_GP_TOP_AN_SPEED_100:
1128 bp->line_speed = SPEED_100;
1129 break;
1130 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1131 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1132 bp->line_speed = SPEED_1000;
1133 break;
1134 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1135 bp->line_speed = SPEED_2500;
1136 break;
1137 }
1138 if (val & MII_BNX2_GP_TOP_AN_FD)
1139 bp->duplex = DUPLEX_FULL;
1140 else
1141 bp->duplex = DUPLEX_HALF;
1142 return 0;
1143}
1144
1145static int
1146bnx2_5708s_linkup(struct bnx2 *bp)
1147{
1148 u32 val;
1149
1150 bp->link_up = 1;
1151 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1152 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1153 case BCM5708S_1000X_STAT1_SPEED_10:
1154 bp->line_speed = SPEED_10;
1155 break;
1156 case BCM5708S_1000X_STAT1_SPEED_100:
1157 bp->line_speed = SPEED_100;
1158 break;
1159 case BCM5708S_1000X_STAT1_SPEED_1G:
1160 bp->line_speed = SPEED_1000;
1161 break;
1162 case BCM5708S_1000X_STAT1_SPEED_2G5:
1163 bp->line_speed = SPEED_2500;
1164 break;
1165 }
1166 if (val & BCM5708S_1000X_STAT1_FD)
1167 bp->duplex = DUPLEX_FULL;
1168 else
1169 bp->duplex = DUPLEX_HALF;
1170
1171 return 0;
1172}
1173
1174static int
1175bnx2_5706s_linkup(struct bnx2 *bp)
1176{
1177 u32 bmcr, local_adv, remote_adv, common;
1178
1179 bp->link_up = 1;
1180 bp->line_speed = SPEED_1000;
1181
1182 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1183 if (bmcr & BMCR_FULLDPLX) {
1184 bp->duplex = DUPLEX_FULL;
1185 }
1186 else {
1187 bp->duplex = DUPLEX_HALF;
1188 }
1189
1190 if (!(bmcr & BMCR_ANENABLE)) {
1191 return 0;
1192 }
1193
1194 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1195 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1196
1197 common = local_adv & remote_adv;
1198 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1199
1200 if (common & ADVERTISE_1000XFULL) {
1201 bp->duplex = DUPLEX_FULL;
1202 }
1203 else {
1204 bp->duplex = DUPLEX_HALF;
1205 }
1206 }
1207
1208 return 0;
1209}
1210
1211static int
1212bnx2_copper_linkup(struct bnx2 *bp)
1213{
1214 u32 bmcr;
1215
1216 bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1217
1218 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1219 if (bmcr & BMCR_ANENABLE) {
1220 u32 local_adv, remote_adv, common;
1221
1222 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1223 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1224
1225 common = local_adv & (remote_adv >> 2);
1226 if (common & ADVERTISE_1000FULL) {
1227 bp->line_speed = SPEED_1000;
1228 bp->duplex = DUPLEX_FULL;
1229 }
1230 else if (common & ADVERTISE_1000HALF) {
1231 bp->line_speed = SPEED_1000;
1232 bp->duplex = DUPLEX_HALF;
1233 }
1234 else {
1235 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1236 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1237
1238 common = local_adv & remote_adv;
1239 if (common & ADVERTISE_100FULL) {
1240 bp->line_speed = SPEED_100;
1241 bp->duplex = DUPLEX_FULL;
1242 }
1243 else if (common & ADVERTISE_100HALF) {
1244 bp->line_speed = SPEED_100;
1245 bp->duplex = DUPLEX_HALF;
1246 }
1247 else if (common & ADVERTISE_10FULL) {
1248 bp->line_speed = SPEED_10;
1249 bp->duplex = DUPLEX_FULL;
1250 }
1251 else if (common & ADVERTISE_10HALF) {
1252 bp->line_speed = SPEED_10;
1253 bp->duplex = DUPLEX_HALF;
1254 }
1255 else {
1256 bp->line_speed = 0;
1257 bp->link_up = 0;
1258 }
1259 }
1260 }
1261 else {
1262 if (bmcr & BMCR_SPEED100) {
1263 bp->line_speed = SPEED_100;
1264 }
1265 else {
1266 bp->line_speed = SPEED_10;
1267 }
1268 if (bmcr & BMCR_FULLDPLX) {
1269 bp->duplex = DUPLEX_FULL;
1270 }
1271 else {
1272 bp->duplex = DUPLEX_HALF;
1273 }
1274 }
1275
1276 if (bp->link_up) {
1277 u32 ext_status;
1278
1279 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1280 if (ext_status & EXT_STATUS_MDIX)
1281 bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1282 }
1283
1284 return 0;
1285}
1286
1287static void
1288bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1289{
1290 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1291
1292 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1293 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1294 val |= 0x02 << 8;
1295
1296 if (bp->flow_ctrl & FLOW_CTRL_TX)
1297 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1298
1299 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1300}
1301
1302static void
1303bnx2_init_all_rx_contexts(struct bnx2 *bp)
1304{
1305 int i;
1306 u32 cid;
1307
1308 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1309 if (i == 1)
1310 cid = RX_RSS_CID;
1311 bnx2_init_rx_context(bp, cid);
1312 }
1313}
1314
1315static void
1316bnx2_set_mac_link(struct bnx2 *bp)
1317{
1318 u32 val;
1319
1320 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1321 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1322 (bp->duplex == DUPLEX_HALF)) {
1323 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1324 }
1325
1326
1327 val = BNX2_RD(bp, BNX2_EMAC_MODE);
1328
1329 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1330 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1331 BNX2_EMAC_MODE_25G_MODE);
1332
1333 if (bp->link_up) {
1334 switch (bp->line_speed) {
1335 case SPEED_10:
1336 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1337 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1338 break;
1339 }
1340 fallthrough;
1341 case SPEED_100:
1342 val |= BNX2_EMAC_MODE_PORT_MII;
1343 break;
1344 case SPEED_2500:
1345 val |= BNX2_EMAC_MODE_25G_MODE;
1346 fallthrough;
1347 case SPEED_1000:
1348 val |= BNX2_EMAC_MODE_PORT_GMII;
1349 break;
1350 }
1351 }
1352 else {
1353 val |= BNX2_EMAC_MODE_PORT_GMII;
1354 }
1355
1356
1357 if (bp->duplex == DUPLEX_HALF)
1358 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1359 BNX2_WR(bp, BNX2_EMAC_MODE, val);
1360
1361
1362 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1363
1364 if (bp->flow_ctrl & FLOW_CTRL_RX)
1365 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1366 BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1367
1368
1369 val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1370 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1371
1372 if (bp->flow_ctrl & FLOW_CTRL_TX)
1373 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1374 BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1375
1376
1377 BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1378
1379 bnx2_init_all_rx_contexts(bp);
1380}
1381
1382static void
1383bnx2_enable_bmsr1(struct bnx2 *bp)
1384{
1385 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1386 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1387 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1388 MII_BNX2_BLK_ADDR_GP_STATUS);
1389}
1390
1391static void
1392bnx2_disable_bmsr1(struct bnx2 *bp)
1393{
1394 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1395 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1396 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1397 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1398}
1399
1400static int
1401bnx2_test_and_enable_2g5(struct bnx2 *bp)
1402{
1403 u32 up1;
1404 int ret = 1;
1405
1406 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1407 return 0;
1408
1409 if (bp->autoneg & AUTONEG_SPEED)
1410 bp->advertising |= ADVERTISED_2500baseX_Full;
1411
1412 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1413 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1414
1415 bnx2_read_phy(bp, bp->mii_up1, &up1);
1416 if (!(up1 & BCM5708S_UP1_2G5)) {
1417 up1 |= BCM5708S_UP1_2G5;
1418 bnx2_write_phy(bp, bp->mii_up1, up1);
1419 ret = 0;
1420 }
1421
1422 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1423 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1424 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1425
1426 return ret;
1427}
1428
1429static int
1430bnx2_test_and_disable_2g5(struct bnx2 *bp)
1431{
1432 u32 up1;
1433 int ret = 0;
1434
1435 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1436 return 0;
1437
1438 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1439 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1440
1441 bnx2_read_phy(bp, bp->mii_up1, &up1);
1442 if (up1 & BCM5708S_UP1_2G5) {
1443 up1 &= ~BCM5708S_UP1_2G5;
1444 bnx2_write_phy(bp, bp->mii_up1, up1);
1445 ret = 1;
1446 }
1447
1448 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1449 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1450 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1451
1452 return ret;
1453}
1454
1455static void
1456bnx2_enable_forced_2g5(struct bnx2 *bp)
1457{
1458 u32 bmcr;
1459 int err;
1460
1461 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1462 return;
1463
1464 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1465 u32 val;
1466
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468 MII_BNX2_BLK_ADDR_SERDES_DIG);
1469 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1470 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1471 val |= MII_BNX2_SD_MISC1_FORCE |
1472 MII_BNX2_SD_MISC1_FORCE_2_5G;
1473 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1474 }
1475
1476 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1477 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1478 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1479
1480 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1481 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1482 if (!err)
1483 bmcr |= BCM5708S_BMCR_FORCE_2500;
1484 } else {
1485 return;
1486 }
1487
1488 if (err)
1489 return;
1490
1491 if (bp->autoneg & AUTONEG_SPEED) {
1492 bmcr &= ~BMCR_ANENABLE;
1493 if (bp->req_duplex == DUPLEX_FULL)
1494 bmcr |= BMCR_FULLDPLX;
1495 }
1496 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1497}
1498
1499static void
1500bnx2_disable_forced_2g5(struct bnx2 *bp)
1501{
1502 u32 bmcr;
1503 int err;
1504
1505 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1506 return;
1507
1508 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1509 u32 val;
1510
1511 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1512 MII_BNX2_BLK_ADDR_SERDES_DIG);
1513 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1514 val &= ~MII_BNX2_SD_MISC1_FORCE;
1515 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1516 }
1517
1518 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1519 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1520 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1521
1522 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1523 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1524 if (!err)
1525 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1526 } else {
1527 return;
1528 }
1529
1530 if (err)
1531 return;
1532
1533 if (bp->autoneg & AUTONEG_SPEED)
1534 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1535 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1536}
1537
1538static void
1539bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1540{
1541 u32 val;
1542
1543 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1544 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1545 if (start)
1546 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1547 else
1548 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1549}
1550
1551static int
1552bnx2_set_link(struct bnx2 *bp)
1553{
1554 u32 bmsr;
1555 u8 link_up;
1556
1557 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1558 bp->link_up = 1;
1559 return 0;
1560 }
1561
1562 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1563 return 0;
1564
1565 link_up = bp->link_up;
1566
1567 bnx2_enable_bmsr1(bp);
1568 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1569 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1570 bnx2_disable_bmsr1(bp);
1571
1572 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1573 (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1574 u32 val, an_dbg;
1575
1576 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1577 bnx2_5706s_force_link_dn(bp, 0);
1578 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1579 }
1580 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1581
1582 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1583 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1584 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1585
1586 if ((val & BNX2_EMAC_STATUS_LINK) &&
1587 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1588 bmsr |= BMSR_LSTATUS;
1589 else
1590 bmsr &= ~BMSR_LSTATUS;
1591 }
1592
1593 if (bmsr & BMSR_LSTATUS) {
1594 bp->link_up = 1;
1595
1596 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1597 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1598 bnx2_5706s_linkup(bp);
1599 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1600 bnx2_5708s_linkup(bp);
1601 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1602 bnx2_5709s_linkup(bp);
1603 }
1604 else {
1605 bnx2_copper_linkup(bp);
1606 }
1607 bnx2_resolve_flow_ctrl(bp);
1608 }
1609 else {
1610 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1611 (bp->autoneg & AUTONEG_SPEED))
1612 bnx2_disable_forced_2g5(bp);
1613
1614 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1615 u32 bmcr;
1616
1617 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1618 bmcr |= BMCR_ANENABLE;
1619 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1620
1621 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1622 }
1623 bp->link_up = 0;
1624 }
1625
1626 if (bp->link_up != link_up) {
1627 bnx2_report_link(bp);
1628 }
1629
1630 bnx2_set_mac_link(bp);
1631
1632 return 0;
1633}
1634
1635static int
1636bnx2_reset_phy(struct bnx2 *bp)
1637{
1638 int i;
1639 u32 reg;
1640
1641 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1642
1643#define PHY_RESET_MAX_WAIT 100
1644 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1645 udelay(10);
1646
1647 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1648 if (!(reg & BMCR_RESET)) {
1649 udelay(20);
1650 break;
1651 }
1652 }
1653 if (i == PHY_RESET_MAX_WAIT) {
1654 return -EBUSY;
1655 }
1656 return 0;
1657}
1658
1659static u32
1660bnx2_phy_get_pause_adv(struct bnx2 *bp)
1661{
1662 u32 adv = 0;
1663
1664 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1665 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1666
1667 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1668 adv = ADVERTISE_1000XPAUSE;
1669 }
1670 else {
1671 adv = ADVERTISE_PAUSE_CAP;
1672 }
1673 }
1674 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1675 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1676 adv = ADVERTISE_1000XPSE_ASYM;
1677 }
1678 else {
1679 adv = ADVERTISE_PAUSE_ASYM;
1680 }
1681 }
1682 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1683 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1684 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1685 }
1686 else {
1687 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1688 }
1689 }
1690 return adv;
1691}
1692
1693static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1694
1695static int
1696bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1697__releases(&bp->phy_lock)
1698__acquires(&bp->phy_lock)
1699{
1700 u32 speed_arg = 0, pause_adv;
1701
1702 pause_adv = bnx2_phy_get_pause_adv(bp);
1703
1704 if (bp->autoneg & AUTONEG_SPEED) {
1705 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1706 if (bp->advertising & ADVERTISED_10baseT_Half)
1707 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1708 if (bp->advertising & ADVERTISED_10baseT_Full)
1709 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1710 if (bp->advertising & ADVERTISED_100baseT_Half)
1711 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1712 if (bp->advertising & ADVERTISED_100baseT_Full)
1713 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1714 if (bp->advertising & ADVERTISED_1000baseT_Full)
1715 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1716 if (bp->advertising & ADVERTISED_2500baseX_Full)
1717 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1718 } else {
1719 if (bp->req_line_speed == SPEED_2500)
1720 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1721 else if (bp->req_line_speed == SPEED_1000)
1722 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1723 else if (bp->req_line_speed == SPEED_100) {
1724 if (bp->req_duplex == DUPLEX_FULL)
1725 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1726 else
1727 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1728 } else if (bp->req_line_speed == SPEED_10) {
1729 if (bp->req_duplex == DUPLEX_FULL)
1730 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1731 else
1732 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1733 }
1734 }
1735
1736 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1737 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1738 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1739 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1740
1741 if (port == PORT_TP)
1742 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1743 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1744
1745 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1746
1747 spin_unlock_bh(&bp->phy_lock);
1748 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1749 spin_lock_bh(&bp->phy_lock);
1750
1751 return 0;
1752}
1753
1754static int
1755bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1756__releases(&bp->phy_lock)
1757__acquires(&bp->phy_lock)
1758{
1759 u32 adv, bmcr;
1760 u32 new_adv = 0;
1761
1762 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1763 return bnx2_setup_remote_phy(bp, port);
1764
1765 if (!(bp->autoneg & AUTONEG_SPEED)) {
1766 u32 new_bmcr;
1767 int force_link_down = 0;
1768
1769 if (bp->req_line_speed == SPEED_2500) {
1770 if (!bnx2_test_and_enable_2g5(bp))
1771 force_link_down = 1;
1772 } else if (bp->req_line_speed == SPEED_1000) {
1773 if (bnx2_test_and_disable_2g5(bp))
1774 force_link_down = 1;
1775 }
1776 bnx2_read_phy(bp, bp->mii_adv, &adv);
1777 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1778
1779 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1780 new_bmcr = bmcr & ~BMCR_ANENABLE;
1781 new_bmcr |= BMCR_SPEED1000;
1782
1783 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1784 if (bp->req_line_speed == SPEED_2500)
1785 bnx2_enable_forced_2g5(bp);
1786 else if (bp->req_line_speed == SPEED_1000) {
1787 bnx2_disable_forced_2g5(bp);
1788 new_bmcr &= ~0x2000;
1789 }
1790
1791 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1792 if (bp->req_line_speed == SPEED_2500)
1793 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1794 else
1795 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1796 }
1797
1798 if (bp->req_duplex == DUPLEX_FULL) {
1799 adv |= ADVERTISE_1000XFULL;
1800 new_bmcr |= BMCR_FULLDPLX;
1801 }
1802 else {
1803 adv |= ADVERTISE_1000XHALF;
1804 new_bmcr &= ~BMCR_FULLDPLX;
1805 }
1806 if ((new_bmcr != bmcr) || (force_link_down)) {
1807
1808 if (bp->link_up) {
1809 bnx2_write_phy(bp, bp->mii_adv, adv &
1810 ~(ADVERTISE_1000XFULL |
1811 ADVERTISE_1000XHALF));
1812 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1813 BMCR_ANRESTART | BMCR_ANENABLE);
1814
1815 bp->link_up = 0;
1816 netif_carrier_off(bp->dev);
1817 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1818 bnx2_report_link(bp);
1819 }
1820 bnx2_write_phy(bp, bp->mii_adv, adv);
1821 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1822 } else {
1823 bnx2_resolve_flow_ctrl(bp);
1824 bnx2_set_mac_link(bp);
1825 }
1826 return 0;
1827 }
1828
1829 bnx2_test_and_enable_2g5(bp);
1830
1831 if (bp->advertising & ADVERTISED_1000baseT_Full)
1832 new_adv |= ADVERTISE_1000XFULL;
1833
1834 new_adv |= bnx2_phy_get_pause_adv(bp);
1835
1836 bnx2_read_phy(bp, bp->mii_adv, &adv);
1837 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1838
1839 bp->serdes_an_pending = 0;
1840 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1841
1842 if (bp->link_up) {
1843 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1844 spin_unlock_bh(&bp->phy_lock);
1845 msleep(20);
1846 spin_lock_bh(&bp->phy_lock);
1847 }
1848
1849 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1850 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1851 BMCR_ANENABLE);
1852
1853
1854
1855
1856
1857
1858
1859
1860 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1861 bp->serdes_an_pending = 1;
1862 mod_timer(&bp->timer, jiffies + bp->current_interval);
1863 } else {
1864 bnx2_resolve_flow_ctrl(bp);
1865 bnx2_set_mac_link(bp);
1866 }
1867
1868 return 0;
1869}
1870
1871#define ETHTOOL_ALL_FIBRE_SPEED \
1872 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1873 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1874 (ADVERTISED_1000baseT_Full)
1875
1876#define ETHTOOL_ALL_COPPER_SPEED \
1877 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1878 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1879 ADVERTISED_1000baseT_Full)
1880
1881#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1882 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1883
1884#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1885
1886static void
1887bnx2_set_default_remote_link(struct bnx2 *bp)
1888{
1889 u32 link;
1890
1891 if (bp->phy_port == PORT_TP)
1892 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1893 else
1894 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1895
1896 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1897 bp->req_line_speed = 0;
1898 bp->autoneg |= AUTONEG_SPEED;
1899 bp->advertising = ADVERTISED_Autoneg;
1900 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1901 bp->advertising |= ADVERTISED_10baseT_Half;
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1903 bp->advertising |= ADVERTISED_10baseT_Full;
1904 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1905 bp->advertising |= ADVERTISED_100baseT_Half;
1906 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1907 bp->advertising |= ADVERTISED_100baseT_Full;
1908 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1909 bp->advertising |= ADVERTISED_1000baseT_Full;
1910 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1911 bp->advertising |= ADVERTISED_2500baseX_Full;
1912 } else {
1913 bp->autoneg = 0;
1914 bp->advertising = 0;
1915 bp->req_duplex = DUPLEX_FULL;
1916 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1917 bp->req_line_speed = SPEED_10;
1918 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1919 bp->req_duplex = DUPLEX_HALF;
1920 }
1921 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1922 bp->req_line_speed = SPEED_100;
1923 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1924 bp->req_duplex = DUPLEX_HALF;
1925 }
1926 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1927 bp->req_line_speed = SPEED_1000;
1928 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1929 bp->req_line_speed = SPEED_2500;
1930 }
1931}
1932
1933static void
1934bnx2_set_default_link(struct bnx2 *bp)
1935{
1936 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1937 bnx2_set_default_remote_link(bp);
1938 return;
1939 }
1940
1941 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1942 bp->req_line_speed = 0;
1943 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1944 u32 reg;
1945
1946 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1947
1948 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1949 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1950 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1951 bp->autoneg = 0;
1952 bp->req_line_speed = bp->line_speed = SPEED_1000;
1953 bp->req_duplex = DUPLEX_FULL;
1954 }
1955 } else
1956 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1957}
1958
1959static void
1960bnx2_send_heart_beat(struct bnx2 *bp)
1961{
1962 u32 msg;
1963 u32 addr;
1964
1965 spin_lock(&bp->indirect_lock);
1966 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1967 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1968 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1969 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1970 spin_unlock(&bp->indirect_lock);
1971}
1972
1973static void
1974bnx2_remote_phy_event(struct bnx2 *bp)
1975{
1976 u32 msg;
1977 u8 link_up = bp->link_up;
1978 u8 old_port;
1979
1980 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1981
1982 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1983 bnx2_send_heart_beat(bp);
1984
1985 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1986
1987 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1988 bp->link_up = 0;
1989 else {
1990 u32 speed;
1991
1992 bp->link_up = 1;
1993 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1994 bp->duplex = DUPLEX_FULL;
1995 switch (speed) {
1996 case BNX2_LINK_STATUS_10HALF:
1997 bp->duplex = DUPLEX_HALF;
1998 fallthrough;
1999 case BNX2_LINK_STATUS_10FULL:
2000 bp->line_speed = SPEED_10;
2001 break;
2002 case BNX2_LINK_STATUS_100HALF:
2003 bp->duplex = DUPLEX_HALF;
2004 fallthrough;
2005 case BNX2_LINK_STATUS_100BASE_T4:
2006 case BNX2_LINK_STATUS_100FULL:
2007 bp->line_speed = SPEED_100;
2008 break;
2009 case BNX2_LINK_STATUS_1000HALF:
2010 bp->duplex = DUPLEX_HALF;
2011 fallthrough;
2012 case BNX2_LINK_STATUS_1000FULL:
2013 bp->line_speed = SPEED_1000;
2014 break;
2015 case BNX2_LINK_STATUS_2500HALF:
2016 bp->duplex = DUPLEX_HALF;
2017 fallthrough;
2018 case BNX2_LINK_STATUS_2500FULL:
2019 bp->line_speed = SPEED_2500;
2020 break;
2021 default:
2022 bp->line_speed = 0;
2023 break;
2024 }
2025
2026 bp->flow_ctrl = 0;
2027 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2028 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2029 if (bp->duplex == DUPLEX_FULL)
2030 bp->flow_ctrl = bp->req_flow_ctrl;
2031 } else {
2032 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2033 bp->flow_ctrl |= FLOW_CTRL_TX;
2034 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2035 bp->flow_ctrl |= FLOW_CTRL_RX;
2036 }
2037
2038 old_port = bp->phy_port;
2039 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2040 bp->phy_port = PORT_FIBRE;
2041 else
2042 bp->phy_port = PORT_TP;
2043
2044 if (old_port != bp->phy_port)
2045 bnx2_set_default_link(bp);
2046
2047 }
2048 if (bp->link_up != link_up)
2049 bnx2_report_link(bp);
2050
2051 bnx2_set_mac_link(bp);
2052}
2053
2054static int
2055bnx2_set_remote_link(struct bnx2 *bp)
2056{
2057 u32 evt_code;
2058
2059 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2060 switch (evt_code) {
2061 case BNX2_FW_EVT_CODE_LINK_EVENT:
2062 bnx2_remote_phy_event(bp);
2063 break;
2064 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2065 default:
2066 bnx2_send_heart_beat(bp);
2067 break;
2068 }
2069 return 0;
2070}
2071
2072static int
2073bnx2_setup_copper_phy(struct bnx2 *bp)
2074__releases(&bp->phy_lock)
2075__acquires(&bp->phy_lock)
2076{
2077 u32 bmcr, adv_reg, new_adv = 0;
2078 u32 new_bmcr;
2079
2080 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2081
2082 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2083 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2084 ADVERTISE_PAUSE_ASYM);
2085
2086 new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2087
2088 if (bp->autoneg & AUTONEG_SPEED) {
2089 u32 adv1000_reg;
2090 u32 new_adv1000 = 0;
2091
2092 new_adv |= bnx2_phy_get_pause_adv(bp);
2093
2094 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2095 adv1000_reg &= PHY_ALL_1000_SPEED;
2096
2097 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2098 if ((adv1000_reg != new_adv1000) ||
2099 (adv_reg != new_adv) ||
2100 ((bmcr & BMCR_ANENABLE) == 0)) {
2101
2102 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2103 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2104 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2105 BMCR_ANENABLE);
2106 }
2107 else if (bp->link_up) {
2108
2109
2110
2111 bnx2_resolve_flow_ctrl(bp);
2112 bnx2_set_mac_link(bp);
2113 }
2114 return 0;
2115 }
2116
2117
2118 if (adv_reg != new_adv)
2119 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2120
2121 new_bmcr = 0;
2122 if (bp->req_line_speed == SPEED_100) {
2123 new_bmcr |= BMCR_SPEED100;
2124 }
2125 if (bp->req_duplex == DUPLEX_FULL) {
2126 new_bmcr |= BMCR_FULLDPLX;
2127 }
2128 if (new_bmcr != bmcr) {
2129 u32 bmsr;
2130
2131 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2132 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2133
2134 if (bmsr & BMSR_LSTATUS) {
2135
2136 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2137 spin_unlock_bh(&bp->phy_lock);
2138 msleep(50);
2139 spin_lock_bh(&bp->phy_lock);
2140
2141 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2142 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2143 }
2144
2145 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2146
2147
2148
2149
2150
2151 if (bmsr & BMSR_LSTATUS) {
2152 bp->line_speed = bp->req_line_speed;
2153 bp->duplex = bp->req_duplex;
2154 bnx2_resolve_flow_ctrl(bp);
2155 bnx2_set_mac_link(bp);
2156 }
2157 } else {
2158 bnx2_resolve_flow_ctrl(bp);
2159 bnx2_set_mac_link(bp);
2160 }
2161 return 0;
2162}
2163
2164static int
2165bnx2_setup_phy(struct bnx2 *bp, u8 port)
2166__releases(&bp->phy_lock)
2167__acquires(&bp->phy_lock)
2168{
2169 if (bp->loopback == MAC_LOOPBACK)
2170 return 0;
2171
2172 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2173 return bnx2_setup_serdes_phy(bp, port);
2174 }
2175 else {
2176 return bnx2_setup_copper_phy(bp);
2177 }
2178}
2179
2180static int
2181bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2182{
2183 u32 val;
2184
2185 bp->mii_bmcr = MII_BMCR + 0x10;
2186 bp->mii_bmsr = MII_BMSR + 0x10;
2187 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2188 bp->mii_adv = MII_ADVERTISE + 0x10;
2189 bp->mii_lpa = MII_LPA + 0x10;
2190 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2191
2192 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2193 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2194
2195 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2196 if (reset_phy)
2197 bnx2_reset_phy(bp);
2198
2199 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2200
2201 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2202 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2203 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2204 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2205
2206 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2207 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2208 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2209 val |= BCM5708S_UP1_2G5;
2210 else
2211 val &= ~BCM5708S_UP1_2G5;
2212 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2213
2214 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2215 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2216 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2217 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2218
2219 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2220
2221 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2222 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2223 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2224
2225 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2226
2227 return 0;
2228}
2229
2230static int
2231bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2232{
2233 u32 val;
2234
2235 if (reset_phy)
2236 bnx2_reset_phy(bp);
2237
2238 bp->mii_up1 = BCM5708S_UP1;
2239
2240 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2241 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2242 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243
2244 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2245 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2246 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2247
2248 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2249 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2250 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2251
2252 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2253 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2254 val |= BCM5708S_UP1_2G5;
2255 bnx2_write_phy(bp, BCM5708S_UP1, val);
2256 }
2257
2258 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2259 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2260 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2261
2262 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263 BCM5708S_BLK_ADDR_TX_MISC);
2264 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2265 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2266 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2267 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2268 }
2269
2270 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2271 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2272
2273 if (val) {
2274 u32 is_backplane;
2275
2276 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2277 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2278 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2279 BCM5708S_BLK_ADDR_TX_MISC);
2280 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2281 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2282 BCM5708S_BLK_ADDR_DIG);
2283 }
2284 }
2285 return 0;
2286}
2287
2288static int
2289bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2290{
2291 if (reset_phy)
2292 bnx2_reset_phy(bp);
2293
2294 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2295
2296 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2297 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2298
2299 if (bp->dev->mtu > ETH_DATA_LEN) {
2300 u32 val;
2301
2302
2303 bnx2_write_phy(bp, 0x18, 0x7);
2304 bnx2_read_phy(bp, 0x18, &val);
2305 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2306
2307 bnx2_write_phy(bp, 0x1c, 0x6c00);
2308 bnx2_read_phy(bp, 0x1c, &val);
2309 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2310 }
2311 else {
2312 u32 val;
2313
2314 bnx2_write_phy(bp, 0x18, 0x7);
2315 bnx2_read_phy(bp, 0x18, &val);
2316 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2317
2318 bnx2_write_phy(bp, 0x1c, 0x6c00);
2319 bnx2_read_phy(bp, 0x1c, &val);
2320 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2321 }
2322
2323 return 0;
2324}
2325
2326static int
2327bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2328{
2329 u32 val;
2330
2331 if (reset_phy)
2332 bnx2_reset_phy(bp);
2333
2334 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2335 bnx2_write_phy(bp, 0x18, 0x0c00);
2336 bnx2_write_phy(bp, 0x17, 0x000a);
2337 bnx2_write_phy(bp, 0x15, 0x310b);
2338 bnx2_write_phy(bp, 0x17, 0x201f);
2339 bnx2_write_phy(bp, 0x15, 0x9506);
2340 bnx2_write_phy(bp, 0x17, 0x401f);
2341 bnx2_write_phy(bp, 0x15, 0x14e2);
2342 bnx2_write_phy(bp, 0x18, 0x0400);
2343 }
2344
2345 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2346 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2347 MII_BNX2_DSP_EXPAND_REG | 0x8);
2348 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2349 val &= ~(1 << 8);
2350 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2351 }
2352
2353 if (bp->dev->mtu > ETH_DATA_LEN) {
2354
2355 bnx2_write_phy(bp, 0x18, 0x7);
2356 bnx2_read_phy(bp, 0x18, &val);
2357 bnx2_write_phy(bp, 0x18, val | 0x4000);
2358
2359 bnx2_read_phy(bp, 0x10, &val);
2360 bnx2_write_phy(bp, 0x10, val | 0x1);
2361 }
2362 else {
2363 bnx2_write_phy(bp, 0x18, 0x7);
2364 bnx2_read_phy(bp, 0x18, &val);
2365 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2366
2367 bnx2_read_phy(bp, 0x10, &val);
2368 bnx2_write_phy(bp, 0x10, val & ~0x1);
2369 }
2370
2371
2372 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2373 bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2374 val |= AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2375
2376
2377 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2378 val |= AUX_CTL_MISC_CTL_AUTOMDIX;
2379
2380 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2381 return 0;
2382}
2383
2384
2385static int
2386bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2387__releases(&bp->phy_lock)
2388__acquires(&bp->phy_lock)
2389{
2390 u32 val;
2391 int rc = 0;
2392
2393 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2394 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2395
2396 bp->mii_bmcr = MII_BMCR;
2397 bp->mii_bmsr = MII_BMSR;
2398 bp->mii_bmsr1 = MII_BMSR;
2399 bp->mii_adv = MII_ADVERTISE;
2400 bp->mii_lpa = MII_LPA;
2401
2402 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2403
2404 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2405 goto setup_phy;
2406
2407 bnx2_read_phy(bp, MII_PHYSID1, &val);
2408 bp->phy_id = val << 16;
2409 bnx2_read_phy(bp, MII_PHYSID2, &val);
2410 bp->phy_id |= val & 0xffff;
2411
2412 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2413 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2414 rc = bnx2_init_5706s_phy(bp, reset_phy);
2415 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2416 rc = bnx2_init_5708s_phy(bp, reset_phy);
2417 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2418 rc = bnx2_init_5709s_phy(bp, reset_phy);
2419 }
2420 else {
2421 rc = bnx2_init_copper_phy(bp, reset_phy);
2422 }
2423
2424setup_phy:
2425 if (!rc)
2426 rc = bnx2_setup_phy(bp, bp->phy_port);
2427
2428 return rc;
2429}
2430
2431static int
2432bnx2_set_mac_loopback(struct bnx2 *bp)
2433{
2434 u32 mac_mode;
2435
2436 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2437 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2438 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2439 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2440 bp->link_up = 1;
2441 return 0;
2442}
2443
2444static int bnx2_test_link(struct bnx2 *);
2445
2446static int
2447bnx2_set_phy_loopback(struct bnx2 *bp)
2448{
2449 u32 mac_mode;
2450 int rc, i;
2451
2452 spin_lock_bh(&bp->phy_lock);
2453 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2454 BMCR_SPEED1000);
2455 spin_unlock_bh(&bp->phy_lock);
2456 if (rc)
2457 return rc;
2458
2459 for (i = 0; i < 10; i++) {
2460 if (bnx2_test_link(bp) == 0)
2461 break;
2462 msleep(100);
2463 }
2464
2465 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2466 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2467 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2468 BNX2_EMAC_MODE_25G_MODE);
2469
2470 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2471 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2472 bp->link_up = 1;
2473 return 0;
2474}
2475
2476static void
2477bnx2_dump_mcp_state(struct bnx2 *bp)
2478{
2479 struct net_device *dev = bp->dev;
2480 u32 mcp_p0, mcp_p1;
2481
2482 netdev_err(dev, "<--- start MCP states dump --->\n");
2483 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2484 mcp_p0 = BNX2_MCP_STATE_P0;
2485 mcp_p1 = BNX2_MCP_STATE_P1;
2486 } else {
2487 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2488 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2489 }
2490 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2491 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2492 netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2493 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2494 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2495 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2496 netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2497 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2498 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2499 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2500 netdev_err(dev, "DEBUG: shmem states:\n");
2501 netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2502 bnx2_shmem_rd(bp, BNX2_DRV_MB),
2503 bnx2_shmem_rd(bp, BNX2_FW_MB),
2504 bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2505 pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2506 netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2507 bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2508 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2509 pr_cont(" condition[%08x]\n",
2510 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2511 DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2512 DP_SHMEM_LINE(bp, 0x3cc);
2513 DP_SHMEM_LINE(bp, 0x3dc);
2514 DP_SHMEM_LINE(bp, 0x3ec);
2515 netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2516 netdev_err(dev, "<--- end MCP states dump --->\n");
2517}
2518
2519static int
2520bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2521{
2522 int i;
2523 u32 val;
2524
2525 bp->fw_wr_seq++;
2526 msg_data |= bp->fw_wr_seq;
2527 bp->fw_last_msg = msg_data;
2528
2529 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2530
2531 if (!ack)
2532 return 0;
2533
2534
2535 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2536 msleep(10);
2537
2538 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2539
2540 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2541 break;
2542 }
2543 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2544 return 0;
2545
2546
2547 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2548 msg_data &= ~BNX2_DRV_MSG_CODE;
2549 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2550
2551 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2552 if (!silent) {
2553 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2554 bnx2_dump_mcp_state(bp);
2555 }
2556
2557 return -EBUSY;
2558 }
2559
2560 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2561 return -EIO;
2562
2563 return 0;
2564}
2565
2566static int
2567bnx2_init_5709_context(struct bnx2 *bp)
2568{
2569 int i, ret = 0;
2570 u32 val;
2571
2572 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2573 val |= (BNX2_PAGE_BITS - 8) << 16;
2574 BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2575 for (i = 0; i < 10; i++) {
2576 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2577 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2578 break;
2579 udelay(2);
2580 }
2581 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2582 return -EBUSY;
2583
2584 for (i = 0; i < bp->ctx_pages; i++) {
2585 int j;
2586
2587 if (bp->ctx_blk[i])
2588 memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2589 else
2590 return -ENOMEM;
2591
2592 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2593 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2594 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2595 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2596 (u64) bp->ctx_blk_mapping[i] >> 32);
2597 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2598 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2599 for (j = 0; j < 10; j++) {
2600
2601 val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2602 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2603 break;
2604 udelay(5);
2605 }
2606 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2607 ret = -EBUSY;
2608 break;
2609 }
2610 }
2611 return ret;
2612}
2613
2614static void
2615bnx2_init_context(struct bnx2 *bp)
2616{
2617 u32 vcid;
2618
2619 vcid = 96;
2620 while (vcid) {
2621 u32 vcid_addr, pcid_addr, offset;
2622 int i;
2623
2624 vcid--;
2625
2626 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2627 u32 new_vcid;
2628
2629 vcid_addr = GET_PCID_ADDR(vcid);
2630 if (vcid & 0x8) {
2631 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2632 }
2633 else {
2634 new_vcid = vcid;
2635 }
2636 pcid_addr = GET_PCID_ADDR(new_vcid);
2637 }
2638 else {
2639 vcid_addr = GET_CID_ADDR(vcid);
2640 pcid_addr = vcid_addr;
2641 }
2642
2643 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2644 vcid_addr += (i << PHY_CTX_SHIFT);
2645 pcid_addr += (i << PHY_CTX_SHIFT);
2646
2647 BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2648 BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2649
2650
2651 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2652 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2653 }
2654 }
2655}
2656
2657static int
2658bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2659{
2660 u16 *good_mbuf;
2661 u32 good_mbuf_cnt;
2662 u32 val;
2663
2664 good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
2665 if (!good_mbuf)
2666 return -ENOMEM;
2667
2668 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2669 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2670
2671 good_mbuf_cnt = 0;
2672
2673
2674 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2675 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2676 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2677 BNX2_RBUF_COMMAND_ALLOC_REQ);
2678
2679 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2680
2681 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2682
2683
2684 if (!(val & (1 << 9))) {
2685 good_mbuf[good_mbuf_cnt] = (u16) val;
2686 good_mbuf_cnt++;
2687 }
2688
2689 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2690 }
2691
2692
2693
2694 while (good_mbuf_cnt) {
2695 good_mbuf_cnt--;
2696
2697 val = good_mbuf[good_mbuf_cnt];
2698 val = (val << 9) | val | 1;
2699
2700 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2701 }
2702 kfree(good_mbuf);
2703 return 0;
2704}
2705
2706static void
2707bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2708{
2709 u32 val;
2710
2711 val = (mac_addr[0] << 8) | mac_addr[1];
2712
2713 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2714
2715 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2716 (mac_addr[4] << 8) | mac_addr[5];
2717
2718 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2719}
2720
2721static inline int
2722bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2723{
2724 dma_addr_t mapping;
2725 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2726 struct bnx2_rx_bd *rxbd =
2727 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2728 struct page *page = alloc_page(gfp);
2729
2730 if (!page)
2731 return -ENOMEM;
2732 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2733 PCI_DMA_FROMDEVICE);
2734 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2735 __free_page(page);
2736 return -EIO;
2737 }
2738
2739 rx_pg->page = page;
2740 dma_unmap_addr_set(rx_pg, mapping, mapping);
2741 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2742 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2743 return 0;
2744}
2745
2746static void
2747bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2748{
2749 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2750 struct page *page = rx_pg->page;
2751
2752 if (!page)
2753 return;
2754
2755 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2756 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2757
2758 __free_page(page);
2759 rx_pg->page = NULL;
2760}
2761
2762static inline int
2763bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2764{
2765 u8 *data;
2766 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2767 dma_addr_t mapping;
2768 struct bnx2_rx_bd *rxbd =
2769 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2770
2771 data = kmalloc(bp->rx_buf_size, gfp);
2772 if (!data)
2773 return -ENOMEM;
2774
2775 mapping = dma_map_single(&bp->pdev->dev,
2776 get_l2_fhdr(data),
2777 bp->rx_buf_use_size,
2778 PCI_DMA_FROMDEVICE);
2779 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2780 kfree(data);
2781 return -EIO;
2782 }
2783
2784 rx_buf->data = data;
2785 dma_unmap_addr_set(rx_buf, mapping, mapping);
2786
2787 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2788 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2789
2790 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2791
2792 return 0;
2793}
2794
2795static int
2796bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2797{
2798 struct status_block *sblk = bnapi->status_blk.msi;
2799 u32 new_link_state, old_link_state;
2800 int is_set = 1;
2801
2802 new_link_state = sblk->status_attn_bits & event;
2803 old_link_state = sblk->status_attn_bits_ack & event;
2804 if (new_link_state != old_link_state) {
2805 if (new_link_state)
2806 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2807 else
2808 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2809 } else
2810 is_set = 0;
2811
2812 return is_set;
2813}
2814
2815static void
2816bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2817{
2818 spin_lock(&bp->phy_lock);
2819
2820 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2821 bnx2_set_link(bp);
2822 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2823 bnx2_set_remote_link(bp);
2824
2825 spin_unlock(&bp->phy_lock);
2826
2827}
2828
2829static inline u16
2830bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2831{
2832 u16 cons;
2833
2834 cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2835
2836 if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2837 cons++;
2838 return cons;
2839}
2840
2841static int
2842bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2843{
2844 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2845 u16 hw_cons, sw_cons, sw_ring_cons;
2846 int tx_pkt = 0, index;
2847 unsigned int tx_bytes = 0;
2848 struct netdev_queue *txq;
2849
2850 index = (bnapi - bp->bnx2_napi);
2851 txq = netdev_get_tx_queue(bp->dev, index);
2852
2853 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2854 sw_cons = txr->tx_cons;
2855
2856 while (sw_cons != hw_cons) {
2857 struct bnx2_sw_tx_bd *tx_buf;
2858 struct sk_buff *skb;
2859 int i, last;
2860
2861 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2862
2863 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2864 skb = tx_buf->skb;
2865
2866
2867 prefetch(&skb->end);
2868
2869
2870 if (tx_buf->is_gso) {
2871 u16 last_idx, last_ring_idx;
2872
2873 last_idx = sw_cons + tx_buf->nr_frags + 1;
2874 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2875 if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2876 last_idx++;
2877 }
2878 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2879 break;
2880 }
2881 }
2882
2883 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2884 skb_headlen(skb), PCI_DMA_TODEVICE);
2885
2886 tx_buf->skb = NULL;
2887 last = tx_buf->nr_frags;
2888
2889 for (i = 0; i < last; i++) {
2890 struct bnx2_sw_tx_bd *tx_buf;
2891
2892 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2893
2894 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2895 dma_unmap_page(&bp->pdev->dev,
2896 dma_unmap_addr(tx_buf, mapping),
2897 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2898 PCI_DMA_TODEVICE);
2899 }
2900
2901 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2902
2903 tx_bytes += skb->len;
2904 dev_kfree_skb_any(skb);
2905 tx_pkt++;
2906 if (tx_pkt == budget)
2907 break;
2908
2909 if (hw_cons == sw_cons)
2910 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2911 }
2912
2913 netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2914 txr->hw_tx_cons = hw_cons;
2915 txr->tx_cons = sw_cons;
2916
2917
2918
2919
2920
2921
2922 smp_mb();
2923
2924 if (unlikely(netif_tx_queue_stopped(txq)) &&
2925 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2926 __netif_tx_lock(txq, smp_processor_id());
2927 if ((netif_tx_queue_stopped(txq)) &&
2928 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2929 netif_tx_wake_queue(txq);
2930 __netif_tx_unlock(txq);
2931 }
2932
2933 return tx_pkt;
2934}
2935
2936static void
2937bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2938 struct sk_buff *skb, int count)
2939{
2940 struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2941 struct bnx2_rx_bd *cons_bd, *prod_bd;
2942 int i;
2943 u16 hw_prod, prod;
2944 u16 cons = rxr->rx_pg_cons;
2945
2946 cons_rx_pg = &rxr->rx_pg_ring[cons];
2947
2948
2949
2950
2951
2952 if (skb) {
2953 struct page *page;
2954 struct skb_shared_info *shinfo;
2955
2956 shinfo = skb_shinfo(skb);
2957 shinfo->nr_frags--;
2958 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2959 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2960
2961 cons_rx_pg->page = page;
2962 dev_kfree_skb(skb);
2963 }
2964
2965 hw_prod = rxr->rx_pg_prod;
2966
2967 for (i = 0; i < count; i++) {
2968 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2969
2970 prod_rx_pg = &rxr->rx_pg_ring[prod];
2971 cons_rx_pg = &rxr->rx_pg_ring[cons];
2972 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2973 [BNX2_RX_IDX(cons)];
2974 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2975 [BNX2_RX_IDX(prod)];
2976
2977 if (prod != cons) {
2978 prod_rx_pg->page = cons_rx_pg->page;
2979 cons_rx_pg->page = NULL;
2980 dma_unmap_addr_set(prod_rx_pg, mapping,
2981 dma_unmap_addr(cons_rx_pg, mapping));
2982
2983 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2984 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2985
2986 }
2987 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2988 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2989 }
2990 rxr->rx_pg_prod = hw_prod;
2991 rxr->rx_pg_cons = cons;
2992}
2993
2994static inline void
2995bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2996 u8 *data, u16 cons, u16 prod)
2997{
2998 struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2999 struct bnx2_rx_bd *cons_bd, *prod_bd;
3000
3001 cons_rx_buf = &rxr->rx_buf_ring[cons];
3002 prod_rx_buf = &rxr->rx_buf_ring[prod];
3003
3004 dma_sync_single_for_device(&bp->pdev->dev,
3005 dma_unmap_addr(cons_rx_buf, mapping),
3006 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
3007
3008 rxr->rx_prod_bseq += bp->rx_buf_use_size;
3009
3010 prod_rx_buf->data = data;
3011
3012 if (cons == prod)
3013 return;
3014
3015 dma_unmap_addr_set(prod_rx_buf, mapping,
3016 dma_unmap_addr(cons_rx_buf, mapping));
3017
3018 cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3019 prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3020 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3021 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3022}
3023
3024static struct sk_buff *
3025bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3026 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3027 u32 ring_idx)
3028{
3029 int err;
3030 u16 prod = ring_idx & 0xffff;
3031 struct sk_buff *skb;
3032
3033 err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3034 if (unlikely(err)) {
3035 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3036error:
3037 if (hdr_len) {
3038 unsigned int raw_len = len + 4;
3039 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3040
3041 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3042 }
3043 return NULL;
3044 }
3045
3046 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3047 PCI_DMA_FROMDEVICE);
3048 skb = build_skb(data, 0);
3049 if (!skb) {
3050 kfree(data);
3051 goto error;
3052 }
3053 skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3054 if (hdr_len == 0) {
3055 skb_put(skb, len);
3056 return skb;
3057 } else {
3058 unsigned int i, frag_len, frag_size, pages;
3059 struct bnx2_sw_pg *rx_pg;
3060 u16 pg_cons = rxr->rx_pg_cons;
3061 u16 pg_prod = rxr->rx_pg_prod;
3062
3063 frag_size = len + 4 - hdr_len;
3064 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3065 skb_put(skb, hdr_len);
3066
3067 for (i = 0; i < pages; i++) {
3068 dma_addr_t mapping_old;
3069
3070 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3071 if (unlikely(frag_len <= 4)) {
3072 unsigned int tail = 4 - frag_len;
3073
3074 rxr->rx_pg_cons = pg_cons;
3075 rxr->rx_pg_prod = pg_prod;
3076 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3077 pages - i);
3078 skb->len -= tail;
3079 if (i == 0) {
3080 skb->tail -= tail;
3081 } else {
3082 skb_frag_t *frag =
3083 &skb_shinfo(skb)->frags[i - 1];
3084 skb_frag_size_sub(frag, tail);
3085 skb->data_len -= tail;
3086 }
3087 return skb;
3088 }
3089 rx_pg = &rxr->rx_pg_ring[pg_cons];
3090
3091
3092
3093
3094 mapping_old = dma_unmap_addr(rx_pg, mapping);
3095 if (i == pages - 1)
3096 frag_len -= 4;
3097
3098 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3099 rx_pg->page = NULL;
3100
3101 err = bnx2_alloc_rx_page(bp, rxr,
3102 BNX2_RX_PG_RING_IDX(pg_prod),
3103 GFP_ATOMIC);
3104 if (unlikely(err)) {
3105 rxr->rx_pg_cons = pg_cons;
3106 rxr->rx_pg_prod = pg_prod;
3107 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3108 pages - i);
3109 return NULL;
3110 }
3111
3112 dma_unmap_page(&bp->pdev->dev, mapping_old,
3113 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3114
3115 frag_size -= frag_len;
3116 skb->data_len += frag_len;
3117 skb->truesize += PAGE_SIZE;
3118 skb->len += frag_len;
3119
3120 pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3121 pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3122 }
3123 rxr->rx_pg_prod = pg_prod;
3124 rxr->rx_pg_cons = pg_cons;
3125 }
3126 return skb;
3127}
3128
3129static inline u16
3130bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3131{
3132 u16 cons;
3133
3134 cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3135
3136 if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3137 cons++;
3138 return cons;
3139}
3140
3141static int
3142bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3143{
3144 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3145 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3146 struct l2_fhdr *rx_hdr;
3147 int rx_pkt = 0, pg_ring_used = 0;
3148
3149 if (budget <= 0)
3150 return rx_pkt;
3151
3152 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3153 sw_cons = rxr->rx_cons;
3154 sw_prod = rxr->rx_prod;
3155
3156
3157
3158
3159 rmb();
3160 while (sw_cons != hw_cons) {
3161 unsigned int len, hdr_len;
3162 u32 status;
3163 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3164 struct sk_buff *skb;
3165 dma_addr_t dma_addr;
3166 u8 *data;
3167 u16 next_ring_idx;
3168
3169 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3170 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3171
3172 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3173 data = rx_buf->data;
3174 rx_buf->data = NULL;
3175
3176 rx_hdr = get_l2_fhdr(data);
3177 prefetch(rx_hdr);
3178
3179 dma_addr = dma_unmap_addr(rx_buf, mapping);
3180
3181 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3182 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3183 PCI_DMA_FROMDEVICE);
3184
3185 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3186 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3187 prefetch(get_l2_fhdr(next_rx_buf->data));
3188
3189 len = rx_hdr->l2_fhdr_pkt_len;
3190 status = rx_hdr->l2_fhdr_status;
3191
3192 hdr_len = 0;
3193 if (status & L2_FHDR_STATUS_SPLIT) {
3194 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3195 pg_ring_used = 1;
3196 } else if (len > bp->rx_jumbo_thresh) {
3197 hdr_len = bp->rx_jumbo_thresh;
3198 pg_ring_used = 1;
3199 }
3200
3201 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3202 L2_FHDR_ERRORS_PHY_DECODE |
3203 L2_FHDR_ERRORS_ALIGNMENT |
3204 L2_FHDR_ERRORS_TOO_SHORT |
3205 L2_FHDR_ERRORS_GIANT_FRAME))) {
3206
3207 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3208 sw_ring_prod);
3209 if (pg_ring_used) {
3210 int pages;
3211
3212 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3213
3214 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3215 }
3216 goto next_rx;
3217 }
3218
3219 len -= 4;
3220
3221 if (len <= bp->rx_copy_thresh) {
3222 skb = netdev_alloc_skb(bp->dev, len + 6);
3223 if (!skb) {
3224 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3225 sw_ring_prod);
3226 goto next_rx;
3227 }
3228
3229
3230 memcpy(skb->data,
3231 (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3232 len + 6);
3233 skb_reserve(skb, 6);
3234 skb_put(skb, len);
3235
3236 bnx2_reuse_rx_data(bp, rxr, data,
3237 sw_ring_cons, sw_ring_prod);
3238
3239 } else {
3240 skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3241 (sw_ring_cons << 16) | sw_ring_prod);
3242 if (!skb)
3243 goto next_rx;
3244 }
3245 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3246 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3247 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3248
3249 skb->protocol = eth_type_trans(skb, bp->dev);
3250
3251 if (len > (bp->dev->mtu + ETH_HLEN) &&
3252 skb->protocol != htons(0x8100) &&
3253 skb->protocol != htons(ETH_P_8021AD)) {
3254
3255 dev_kfree_skb(skb);
3256 goto next_rx;
3257
3258 }
3259
3260 skb_checksum_none_assert(skb);
3261 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3262 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3263 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3264
3265 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3266 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3267 skb->ip_summed = CHECKSUM_UNNECESSARY;
3268 }
3269 if ((bp->dev->features & NETIF_F_RXHASH) &&
3270 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3271 L2_FHDR_STATUS_USE_RXHASH))
3272 skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3273 PKT_HASH_TYPE_L3);
3274
3275 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3276 napi_gro_receive(&bnapi->napi, skb);
3277 rx_pkt++;
3278
3279next_rx:
3280 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3281 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3282
3283 if (rx_pkt == budget)
3284 break;
3285
3286
3287 if (sw_cons == hw_cons) {
3288 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3289 rmb();
3290 }
3291 }
3292 rxr->rx_cons = sw_cons;
3293 rxr->rx_prod = sw_prod;
3294
3295 if (pg_ring_used)
3296 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3297
3298 BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3299
3300 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3301
3302 return rx_pkt;
3303
3304}
3305
3306
3307
3308
3309static irqreturn_t
3310bnx2_msi(int irq, void *dev_instance)
3311{
3312 struct bnx2_napi *bnapi = dev_instance;
3313 struct bnx2 *bp = bnapi->bp;
3314
3315 prefetch(bnapi->status_blk.msi);
3316 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3317 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3318 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3319
3320
3321 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3322 return IRQ_HANDLED;
3323
3324 napi_schedule(&bnapi->napi);
3325
3326 return IRQ_HANDLED;
3327}
3328
3329static irqreturn_t
3330bnx2_msi_1shot(int irq, void *dev_instance)
3331{
3332 struct bnx2_napi *bnapi = dev_instance;
3333 struct bnx2 *bp = bnapi->bp;
3334
3335 prefetch(bnapi->status_blk.msi);
3336
3337
3338 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3339 return IRQ_HANDLED;
3340
3341 napi_schedule(&bnapi->napi);
3342
3343 return IRQ_HANDLED;
3344}
3345
3346static irqreturn_t
3347bnx2_interrupt(int irq, void *dev_instance)
3348{
3349 struct bnx2_napi *bnapi = dev_instance;
3350 struct bnx2 *bp = bnapi->bp;
3351 struct status_block *sblk = bnapi->status_blk.msi;
3352
3353
3354
3355
3356
3357
3358
3359 if ((sblk->status_idx == bnapi->last_status_idx) &&
3360 (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3361 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3362 return IRQ_NONE;
3363
3364 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3365 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3366 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3367
3368
3369
3370
3371 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3372
3373
3374 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3375 return IRQ_HANDLED;
3376
3377 if (napi_schedule_prep(&bnapi->napi)) {
3378 bnapi->last_status_idx = sblk->status_idx;
3379 __napi_schedule(&bnapi->napi);
3380 }
3381
3382 return IRQ_HANDLED;
3383}
3384
3385static inline int
3386bnx2_has_fast_work(struct bnx2_napi *bnapi)
3387{
3388 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3389 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3390
3391 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3392 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3393 return 1;
3394 return 0;
3395}
3396
3397#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3398 STATUS_ATTN_BITS_TIMER_ABORT)
3399
3400static inline int
3401bnx2_has_work(struct bnx2_napi *bnapi)
3402{
3403 struct status_block *sblk = bnapi->status_blk.msi;
3404
3405 if (bnx2_has_fast_work(bnapi))
3406 return 1;
3407
3408#ifdef BCM_CNIC
3409 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3410 return 1;
3411#endif
3412
3413 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3414 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3415 return 1;
3416
3417 return 0;
3418}
3419
3420static void
3421bnx2_chk_missed_msi(struct bnx2 *bp)
3422{
3423 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3424 u32 msi_ctrl;
3425
3426 if (bnx2_has_work(bnapi)) {
3427 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3428 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3429 return;
3430
3431 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3432 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3433 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3434 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3435 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3436 }
3437 }
3438
3439 bp->idle_chk_status_idx = bnapi->last_status_idx;
3440}
3441
3442#ifdef BCM_CNIC
3443static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3444{
3445 struct cnic_ops *c_ops;
3446
3447 if (!bnapi->cnic_present)
3448 return;
3449
3450 rcu_read_lock();
3451 c_ops = rcu_dereference(bp->cnic_ops);
3452 if (c_ops)
3453 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3454 bnapi->status_blk.msi);
3455 rcu_read_unlock();
3456}
3457#endif
3458
3459static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3460{
3461 struct status_block *sblk = bnapi->status_blk.msi;
3462 u32 status_attn_bits = sblk->status_attn_bits;
3463 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3464
3465 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3466 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3467
3468 bnx2_phy_int(bp, bnapi);
3469
3470
3471
3472
3473 BNX2_WR(bp, BNX2_HC_COMMAND,
3474 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3475 BNX2_RD(bp, BNX2_HC_COMMAND);
3476 }
3477}
3478
3479static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3480 int work_done, int budget)
3481{
3482 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3483 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3484
3485 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3486 bnx2_tx_int(bp, bnapi, 0);
3487
3488 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3489 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3490
3491 return work_done;
3492}
3493
3494static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3495{
3496 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3497 struct bnx2 *bp = bnapi->bp;
3498 int work_done = 0;
3499 struct status_block_msix *sblk = bnapi->status_blk.msix;
3500
3501 while (1) {
3502 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3503 if (unlikely(work_done >= budget))
3504 break;
3505
3506 bnapi->last_status_idx = sblk->status_idx;
3507
3508 rmb();
3509 if (likely(!bnx2_has_fast_work(bnapi))) {
3510
3511 napi_complete_done(napi, work_done);
3512 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3513 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3514 bnapi->last_status_idx);
3515 break;
3516 }
3517 }
3518 return work_done;
3519}
3520
3521static int bnx2_poll(struct napi_struct *napi, int budget)
3522{
3523 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3524 struct bnx2 *bp = bnapi->bp;
3525 int work_done = 0;
3526 struct status_block *sblk = bnapi->status_blk.msi;
3527
3528 while (1) {
3529 bnx2_poll_link(bp, bnapi);
3530
3531 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3532
3533#ifdef BCM_CNIC
3534 bnx2_poll_cnic(bp, bnapi);
3535#endif
3536
3537
3538
3539
3540
3541 bnapi->last_status_idx = sblk->status_idx;
3542
3543 if (unlikely(work_done >= budget))
3544 break;
3545
3546 rmb();
3547 if (likely(!bnx2_has_work(bnapi))) {
3548 napi_complete_done(napi, work_done);
3549 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3550 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3551 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3552 bnapi->last_status_idx);
3553 break;
3554 }
3555 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3556 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3557 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3558 bnapi->last_status_idx);
3559
3560 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3561 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3562 bnapi->last_status_idx);
3563 break;
3564 }
3565 }
3566
3567 return work_done;
3568}
3569
3570
3571
3572
3573static void
3574bnx2_set_rx_mode(struct net_device *dev)
3575{
3576 struct bnx2 *bp = netdev_priv(dev);
3577 u32 rx_mode, sort_mode;
3578 struct netdev_hw_addr *ha;
3579 int i;
3580
3581 if (!netif_running(dev))
3582 return;
3583
3584 spin_lock_bh(&bp->phy_lock);
3585
3586 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3587 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3588 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3589 if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3590 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3591 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3592 if (dev->flags & IFF_PROMISC) {
3593
3594 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3595 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3596 BNX2_RPM_SORT_USER0_PROM_VLAN;
3597 }
3598 else if (dev->flags & IFF_ALLMULTI) {
3599 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3600 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3601 0xffffffff);
3602 }
3603 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3604 }
3605 else {
3606
3607 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3608 u32 regidx;
3609 u32 bit;
3610 u32 crc;
3611
3612 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3613
3614 netdev_for_each_mc_addr(ha, dev) {
3615 crc = ether_crc_le(ETH_ALEN, ha->addr);
3616 bit = crc & 0xff;
3617 regidx = (bit & 0xe0) >> 5;
3618 bit &= 0x1f;
3619 mc_filter[regidx] |= (1 << bit);
3620 }
3621
3622 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3623 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3624 mc_filter[i]);
3625 }
3626
3627 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3628 }
3629
3630 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3631 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3632 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3633 BNX2_RPM_SORT_USER0_PROM_VLAN;
3634 } else if (!(dev->flags & IFF_PROMISC)) {
3635
3636 i = 0;
3637 netdev_for_each_uc_addr(ha, dev) {
3638 bnx2_set_mac_addr(bp, ha->addr,
3639 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3640 sort_mode |= (1 <<
3641 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3642 i++;
3643 }
3644
3645 }
3646
3647 if (rx_mode != bp->rx_mode) {
3648 bp->rx_mode = rx_mode;
3649 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3650 }
3651
3652 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3653 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3654 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3655
3656 spin_unlock_bh(&bp->phy_lock);
3657}
3658
3659static int
3660check_fw_section(const struct firmware *fw,
3661 const struct bnx2_fw_file_section *section,
3662 u32 alignment, bool non_empty)
3663{
3664 u32 offset = be32_to_cpu(section->offset);
3665 u32 len = be32_to_cpu(section->len);
3666
3667 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3668 return -EINVAL;
3669 if ((non_empty && len == 0) || len > fw->size - offset ||
3670 len & (alignment - 1))
3671 return -EINVAL;
3672 return 0;
3673}
3674
3675static int
3676check_mips_fw_entry(const struct firmware *fw,
3677 const struct bnx2_mips_fw_file_entry *entry)
3678{
3679 if (check_fw_section(fw, &entry->text, 4, true) ||
3680 check_fw_section(fw, &entry->data, 4, false) ||
3681 check_fw_section(fw, &entry->rodata, 4, false))
3682 return -EINVAL;
3683 return 0;
3684}
3685
3686static void bnx2_release_firmware(struct bnx2 *bp)
3687{
3688 if (bp->rv2p_firmware) {
3689 release_firmware(bp->mips_firmware);
3690 release_firmware(bp->rv2p_firmware);
3691 bp->rv2p_firmware = NULL;
3692 }
3693}
3694
3695static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3696{
3697 const char *mips_fw_file, *rv2p_fw_file;
3698 const struct bnx2_mips_fw_file *mips_fw;
3699 const struct bnx2_rv2p_fw_file *rv2p_fw;
3700 int rc;
3701
3702 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3703 mips_fw_file = FW_MIPS_FILE_09;
3704 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3705 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3706 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3707 else
3708 rv2p_fw_file = FW_RV2P_FILE_09;
3709 } else {
3710 mips_fw_file = FW_MIPS_FILE_06;
3711 rv2p_fw_file = FW_RV2P_FILE_06;
3712 }
3713
3714 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3715 if (rc) {
3716 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3717 goto out;
3718 }
3719
3720 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3721 if (rc) {
3722 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3723 goto err_release_mips_firmware;
3724 }
3725 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3726 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3727 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3728 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3729 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3730 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3731 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3732 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3733 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3734 rc = -EINVAL;
3735 goto err_release_firmware;
3736 }
3737 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3738 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3739 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3740 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3741 rc = -EINVAL;
3742 goto err_release_firmware;
3743 }
3744out:
3745 return rc;
3746
3747err_release_firmware:
3748 release_firmware(bp->rv2p_firmware);
3749 bp->rv2p_firmware = NULL;
3750err_release_mips_firmware:
3751 release_firmware(bp->mips_firmware);
3752 goto out;
3753}
3754
3755static int bnx2_request_firmware(struct bnx2 *bp)
3756{
3757 return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3758}
3759
3760static u32
3761rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3762{
3763 switch (idx) {
3764 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3765 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3766 rv2p_code |= RV2P_BD_PAGE_SIZE;
3767 break;
3768 }
3769 return rv2p_code;
3770}
3771
3772static int
3773load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3774 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3775{
3776 u32 rv2p_code_len, file_offset;
3777 __be32 *rv2p_code;
3778 int i;
3779 u32 val, cmd, addr;
3780
3781 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3782 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3783
3784 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3785
3786 if (rv2p_proc == RV2P_PROC1) {
3787 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3788 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3789 } else {
3790 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3791 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3792 }
3793
3794 for (i = 0; i < rv2p_code_len; i += 8) {
3795 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3796 rv2p_code++;
3797 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3798 rv2p_code++;
3799
3800 val = (i / 8) | cmd;
3801 BNX2_WR(bp, addr, val);
3802 }
3803
3804 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3805 for (i = 0; i < 8; i++) {
3806 u32 loc, code;
3807
3808 loc = be32_to_cpu(fw_entry->fixup[i]);
3809 if (loc && ((loc * 4) < rv2p_code_len)) {
3810 code = be32_to_cpu(*(rv2p_code + loc - 1));
3811 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3812 code = be32_to_cpu(*(rv2p_code + loc));
3813 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3814 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3815
3816 val = (loc / 2) | cmd;
3817 BNX2_WR(bp, addr, val);
3818 }
3819 }
3820
3821
3822 if (rv2p_proc == RV2P_PROC1) {
3823 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3824 }
3825 else {
3826 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3827 }
3828
3829 return 0;
3830}
3831
3832static int
3833load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3834 const struct bnx2_mips_fw_file_entry *fw_entry)
3835{
3836 u32 addr, len, file_offset;
3837 __be32 *data;
3838 u32 offset;
3839 u32 val;
3840
3841
3842 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3843 val |= cpu_reg->mode_value_halt;
3844 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3845 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3846
3847
3848 addr = be32_to_cpu(fw_entry->text.addr);
3849 len = be32_to_cpu(fw_entry->text.len);
3850 file_offset = be32_to_cpu(fw_entry->text.offset);
3851 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3852
3853 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3854 if (len) {
3855 int j;
3856
3857 for (j = 0; j < (len / 4); j++, offset += 4)
3858 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3859 }
3860
3861
3862 addr = be32_to_cpu(fw_entry->data.addr);
3863 len = be32_to_cpu(fw_entry->data.len);
3864 file_offset = be32_to_cpu(fw_entry->data.offset);
3865 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3866
3867 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3868 if (len) {
3869 int j;
3870
3871 for (j = 0; j < (len / 4); j++, offset += 4)
3872 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3873 }
3874
3875
3876 addr = be32_to_cpu(fw_entry->rodata.addr);
3877 len = be32_to_cpu(fw_entry->rodata.len);
3878 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3879 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3880
3881 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3882 if (len) {
3883 int j;
3884
3885 for (j = 0; j < (len / 4); j++, offset += 4)
3886 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3887 }
3888
3889
3890 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3891
3892 val = be32_to_cpu(fw_entry->start_addr);
3893 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3894
3895
3896 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3897 val &= ~cpu_reg->mode_value_halt;
3898 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3899 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3900
3901 return 0;
3902}
3903
3904static int
3905bnx2_init_cpus(struct bnx2 *bp)
3906{
3907 const struct bnx2_mips_fw_file *mips_fw =
3908 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3909 const struct bnx2_rv2p_fw_file *rv2p_fw =
3910 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3911 int rc;
3912
3913
3914 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3915 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3916
3917
3918 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3919 if (rc)
3920 goto init_cpu_err;
3921
3922
3923 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3924 if (rc)
3925 goto init_cpu_err;
3926
3927
3928 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3929 if (rc)
3930 goto init_cpu_err;
3931
3932
3933 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3934 if (rc)
3935 goto init_cpu_err;
3936
3937
3938 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3939
3940init_cpu_err:
3941 return rc;
3942}
3943
3944static void
3945bnx2_setup_wol(struct bnx2 *bp)
3946{
3947 int i;
3948 u32 val, wol_msg;
3949
3950 if (bp->wol) {
3951 u32 advertising;
3952 u8 autoneg;
3953
3954 autoneg = bp->autoneg;
3955 advertising = bp->advertising;
3956
3957 if (bp->phy_port == PORT_TP) {
3958 bp->autoneg = AUTONEG_SPEED;
3959 bp->advertising = ADVERTISED_10baseT_Half |
3960 ADVERTISED_10baseT_Full |
3961 ADVERTISED_100baseT_Half |
3962 ADVERTISED_100baseT_Full |
3963 ADVERTISED_Autoneg;
3964 }
3965
3966 spin_lock_bh(&bp->phy_lock);
3967 bnx2_setup_phy(bp, bp->phy_port);
3968 spin_unlock_bh(&bp->phy_lock);
3969
3970 bp->autoneg = autoneg;
3971 bp->advertising = advertising;
3972
3973 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3974
3975 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3976
3977
3978 val &= ~BNX2_EMAC_MODE_PORT;
3979 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3980 BNX2_EMAC_MODE_ACPI_RCVD |
3981 BNX2_EMAC_MODE_MPKT;
3982 if (bp->phy_port == PORT_TP) {
3983 val |= BNX2_EMAC_MODE_PORT_MII;
3984 } else {
3985 val |= BNX2_EMAC_MODE_PORT_GMII;
3986 if (bp->line_speed == SPEED_2500)
3987 val |= BNX2_EMAC_MODE_25G_MODE;
3988 }
3989
3990 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3991
3992
3993 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3994 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3995 0xffffffff);
3996 }
3997 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3998
3999 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4000 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4001 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4002 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4003
4004
4005 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4006 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4007 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4008 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4009
4010 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4011 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4012 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4013
4014 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4015 } else {
4016 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4017 }
4018
4019 if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4020 u32 val;
4021
4022 wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4023 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4024 bnx2_fw_sync(bp, wol_msg, 1, 0);
4025 return;
4026 }
4027
4028
4029
4030 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4031 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4032 val | BNX2_PORT_FEATURE_ASF_ENABLED);
4033 bnx2_fw_sync(bp, wol_msg, 1, 0);