1
2
3
4
5
6
7
8
9
10
11#include <linux/bitops.h>
12#include <linux/netdevice.h>
13#include <linux/skbuff.h>
14#include <linux/etherdevice.h>
15#include <linux/in.h>
16#include <linux/ethtool.h>
17#include <linux/if_vlan.h>
18#include <linux/if_ether.h>
19#include <linux/ip.h>
20#include <linux/prefetch.h>
21#include <linux/module.h>
22
23#include "bnad.h"
24#include "bna.h"
25#include "cna.h"
26
27static DEFINE_MUTEX(bnad_fwimg_mutex);
28
29
30
31
32static uint bnad_msix_disable;
33module_param(bnad_msix_disable, uint, 0444);
34MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
35
36static uint bnad_ioc_auto_recover = 1;
37module_param(bnad_ioc_auto_recover, uint, 0444);
38MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
39
40static uint bna_debugfs_enable = 1;
41module_param(bna_debugfs_enable, uint, 0644);
42MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
43 " Range[false:0|true:1]");
44
45
46
47
48static u32 bnad_rxqs_per_cq = 2;
49static atomic_t bna_id;
50static const u8 bnad_bcast_addr[] __aligned(2) =
51 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
52
53
54
55
56#define BNAD_GET_MBOX_IRQ(_bnad) \
57 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
58 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
59 ((_bnad)->pcidev->irq))
60
61#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
62do { \
63 (_res_info)->res_type = BNA_RES_T_MEM; \
64 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
65 (_res_info)->res_u.mem_info.num = (_num); \
66 (_res_info)->res_u.mem_info.len = (_size); \
67} while (0)
68
69
70
71
72static void
73bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
74{
75 struct bna_cq_entry *cmpl;
76 int i;
77
78 for (i = 0; i < ccb->q_depth; i++) {
79 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
80 cmpl->valid = 0;
81 }
82}
83
84
85
86
87
88static u32
89bnad_tx_buff_unmap(struct bnad *bnad,
90 struct bnad_tx_unmap *unmap_q,
91 u32 q_depth, u32 index)
92{
93 struct bnad_tx_unmap *unmap;
94 struct sk_buff *skb;
95 int vector, nvecs;
96
97 unmap = &unmap_q[index];
98 nvecs = unmap->nvecs;
99
100 skb = unmap->skb;
101 unmap->skb = NULL;
102 unmap->nvecs = 0;
103 dma_unmap_single(&bnad->pcidev->dev,
104 dma_unmap_addr(&unmap->vectors[0], dma_addr),
105 skb_headlen(skb), DMA_TO_DEVICE);
106 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
107 nvecs--;
108
109 vector = 0;
110 while (nvecs) {
111 vector++;
112 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
113 vector = 0;
114 BNA_QE_INDX_INC(index, q_depth);
115 unmap = &unmap_q[index];
116 }
117
118 dma_unmap_page(&bnad->pcidev->dev,
119 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
120 dma_unmap_len(&unmap->vectors[vector], dma_len),
121 DMA_TO_DEVICE);
122 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
123 nvecs--;
124 }
125
126 BNA_QE_INDX_INC(index, q_depth);
127
128 return index;
129}
130
131
132
133
134
135
136static void
137bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
138{
139 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
140 struct sk_buff *skb;
141 int i;
142
143 for (i = 0; i < tcb->q_depth; i++) {
144 skb = unmap_q[i].skb;
145 if (!skb)
146 continue;
147 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
148
149 dev_kfree_skb_any(skb);
150 }
151}
152
153
154
155
156
157
158static u32
159bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
160{
161 u32 sent_packets = 0, sent_bytes = 0;
162 u32 wis, unmap_wis, hw_cons, cons, q_depth;
163 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
164 struct bnad_tx_unmap *unmap;
165 struct sk_buff *skb;
166
167
168 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
169 return 0;
170
171 hw_cons = *(tcb->hw_consumer_index);
172 rmb();
173 cons = tcb->consumer_index;
174 q_depth = tcb->q_depth;
175
176 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
177 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
178
179 while (wis) {
180 unmap = &unmap_q[cons];
181
182 skb = unmap->skb;
183
184 sent_packets++;
185 sent_bytes += skb->len;
186
187 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
188 wis -= unmap_wis;
189
190 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
191 dev_kfree_skb_any(skb);
192 }
193
194
195 tcb->consumer_index = hw_cons;
196
197 tcb->txq->tx_packets += sent_packets;
198 tcb->txq->tx_bytes += sent_bytes;
199
200 return sent_packets;
201}
202
203static u32
204bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
205{
206 struct net_device *netdev = bnad->netdev;
207 u32 sent = 0;
208
209 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
210 return 0;
211
212 sent = bnad_txcmpl_process(bnad, tcb);
213 if (sent) {
214 if (netif_queue_stopped(netdev) &&
215 netif_carrier_ok(netdev) &&
216 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
217 BNAD_NETIF_WAKE_THRESHOLD) {
218 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
219 netif_wake_queue(netdev);
220 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
221 }
222 }
223 }
224
225 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
226 bna_ib_ack(tcb->i_dbell, sent);
227
228 smp_mb__before_atomic();
229 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
230
231 return sent;
232}
233
234
235static irqreturn_t
236bnad_msix_tx(int irq, void *data)
237{
238 struct bna_tcb *tcb = (struct bna_tcb *)data;
239 struct bnad *bnad = tcb->bnad;
240
241 bnad_tx_complete(bnad, tcb);
242
243 return IRQ_HANDLED;
244}
245
246static inline void
247bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
248{
249 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
250
251 unmap_q->reuse_pi = -1;
252 unmap_q->alloc_order = -1;
253 unmap_q->map_size = 0;
254 unmap_q->type = BNAD_RXBUF_NONE;
255}
256
257
258static int
259bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
260{
261 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
262 int order;
263
264 bnad_rxq_alloc_uninit(bnad, rcb);
265
266 order = get_order(rcb->rxq->buffer_size);
267
268 unmap_q->type = BNAD_RXBUF_PAGE;
269
270 if (bna_is_small_rxq(rcb->id)) {
271 unmap_q->alloc_order = 0;
272 unmap_q->map_size = rcb->rxq->buffer_size;
273 } else {
274 if (rcb->rxq->multi_buffer) {
275 unmap_q->alloc_order = 0;
276 unmap_q->map_size = rcb->rxq->buffer_size;
277 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
278 } else {
279 unmap_q->alloc_order = order;
280 unmap_q->map_size =
281 (rcb->rxq->buffer_size > 2048) ?
282 PAGE_SIZE << order : 2048;
283 }
284 }
285
286 BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
287
288 return 0;
289}
290
291static inline void
292bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
293{
294 if (!unmap->page)
295 return;
296
297 dma_unmap_page(&bnad->pcidev->dev,
298 dma_unmap_addr(&unmap->vector, dma_addr),
299 unmap->vector.len, DMA_FROM_DEVICE);
300 put_page(unmap->page);
301 unmap->page = NULL;
302 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
303 unmap->vector.len = 0;
304}
305
306static inline void
307bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
308{
309 if (!unmap->skb)
310 return;
311
312 dma_unmap_single(&bnad->pcidev->dev,
313 dma_unmap_addr(&unmap->vector, dma_addr),
314 unmap->vector.len, DMA_FROM_DEVICE);
315 dev_kfree_skb_any(unmap->skb);
316 unmap->skb = NULL;
317 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
318 unmap->vector.len = 0;
319}
320
321static void
322bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
323{
324 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
325 int i;
326
327 for (i = 0; i < rcb->q_depth; i++) {
328 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
329
330 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
331 bnad_rxq_cleanup_skb(bnad, unmap);
332 else
333 bnad_rxq_cleanup_page(bnad, unmap);
334 }
335 bnad_rxq_alloc_uninit(bnad, rcb);
336}
337
338static u32
339bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
340{
341 u32 alloced, prod, q_depth;
342 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
343 struct bnad_rx_unmap *unmap, *prev;
344 struct bna_rxq_entry *rxent;
345 struct page *page;
346 u32 page_offset, alloc_size;
347 dma_addr_t dma_addr;
348
349 prod = rcb->producer_index;
350 q_depth = rcb->q_depth;
351
352 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
353 alloced = 0;
354
355 while (nalloc--) {
356 unmap = &unmap_q->unmap[prod];
357
358 if (unmap_q->reuse_pi < 0) {
359 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
360 unmap_q->alloc_order);
361 page_offset = 0;
362 } else {
363 prev = &unmap_q->unmap[unmap_q->reuse_pi];
364 page = prev->page;
365 page_offset = prev->page_offset + unmap_q->map_size;
366 get_page(page);
367 }
368
369 if (unlikely(!page)) {
370 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
371 rcb->rxq->rxbuf_alloc_failed++;
372 goto finishing;
373 }
374
375 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
376 unmap_q->map_size, DMA_FROM_DEVICE);
377 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
378 put_page(page);
379 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
380 rcb->rxq->rxbuf_map_failed++;
381 goto finishing;
382 }
383
384 unmap->page = page;
385 unmap->page_offset = page_offset;
386 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
387 unmap->vector.len = unmap_q->map_size;
388 page_offset += unmap_q->map_size;
389
390 if (page_offset < alloc_size)
391 unmap_q->reuse_pi = prod;
392 else
393 unmap_q->reuse_pi = -1;
394
395 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
396 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
397 BNA_QE_INDX_INC(prod, q_depth);
398 alloced++;
399 }
400
401finishing:
402 if (likely(alloced)) {
403 rcb->producer_index = prod;
404 smp_mb();
405 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
406 bna_rxq_prod_indx_doorbell(rcb);
407 }
408
409 return alloced;
410}
411
412static u32
413bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
414{
415 u32 alloced, prod, q_depth, buff_sz;
416 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
417 struct bnad_rx_unmap *unmap;
418 struct bna_rxq_entry *rxent;
419 struct sk_buff *skb;
420 dma_addr_t dma_addr;
421
422 buff_sz = rcb->rxq->buffer_size;
423 prod = rcb->producer_index;
424 q_depth = rcb->q_depth;
425
426 alloced = 0;
427 while (nalloc--) {
428 unmap = &unmap_q->unmap[prod];
429
430 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
431
432 if (unlikely(!skb)) {
433 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
434 rcb->rxq->rxbuf_alloc_failed++;
435 goto finishing;
436 }
437
438 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
439 buff_sz, DMA_FROM_DEVICE);
440 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
441 dev_kfree_skb_any(skb);
442 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
443 rcb->rxq->rxbuf_map_failed++;
444 goto finishing;
445 }
446
447 unmap->skb = skb;
448 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
449 unmap->vector.len = buff_sz;
450
451 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
452 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
453 BNA_QE_INDX_INC(prod, q_depth);
454 alloced++;
455 }
456
457finishing:
458 if (likely(alloced)) {
459 rcb->producer_index = prod;
460 smp_mb();
461 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
462 bna_rxq_prod_indx_doorbell(rcb);
463 }
464
465 return alloced;
466}
467
468static inline void
469bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
470{
471 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
472 u32 to_alloc;
473
474 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
475 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
476 return;
477
478 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
479 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
480 else
481 bnad_rxq_refill_page(bnad, rcb, to_alloc);
482}
483
484#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
485 BNA_CQ_EF_IPV6 | \
486 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
487 BNA_CQ_EF_L4_CKSUM_OK)
488
489#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
490 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
491#define flags_tcp6 (BNA_CQ_EF_IPV6 | \
492 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
493#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
494 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
495#define flags_udp6 (BNA_CQ_EF_IPV6 | \
496 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
497
498static void
499bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
500 u32 sop_ci, u32 nvecs)
501{
502 struct bnad_rx_unmap_q *unmap_q;
503 struct bnad_rx_unmap *unmap;
504 u32 ci, vec;
505
506 unmap_q = rcb->unmap_q;
507 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
508 unmap = &unmap_q->unmap[ci];
509 BNA_QE_INDX_INC(ci, rcb->q_depth);
510
511 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
512 bnad_rxq_cleanup_skb(bnad, unmap);
513 else
514 bnad_rxq_cleanup_page(bnad, unmap);
515 }
516}
517
518static void
519bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
520{
521 struct bna_rcb *rcb;
522 struct bnad *bnad;
523 struct bnad_rx_unmap_q *unmap_q;
524 struct bna_cq_entry *cq, *cmpl;
525 u32 ci, pi, totlen = 0;
526
527 cq = ccb->sw_q;
528 pi = ccb->producer_index;
529 cmpl = &cq[pi];
530
531 rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
532 unmap_q = rcb->unmap_q;
533 bnad = rcb->bnad;
534 ci = rcb->consumer_index;
535
536
537 prefetch(page_address(unmap_q->unmap[ci].page) +
538 unmap_q->unmap[ci].page_offset);
539
540 while (nvecs--) {
541 struct bnad_rx_unmap *unmap;
542 u32 len;
543
544 unmap = &unmap_q->unmap[ci];
545 BNA_QE_INDX_INC(ci, rcb->q_depth);
546
547 dma_unmap_page(&bnad->pcidev->dev,
548 dma_unmap_addr(&unmap->vector, dma_addr),
549 unmap->vector.len, DMA_FROM_DEVICE);
550
551 len = ntohs(cmpl->length);
552 skb->truesize += unmap->vector.len;
553 totlen += len;
554
555 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
556 unmap->page, unmap->page_offset, len);
557
558 unmap->page = NULL;
559 unmap->vector.len = 0;
560
561 BNA_QE_INDX_INC(pi, ccb->q_depth);
562 cmpl = &cq[pi];
563 }
564
565 skb->len += totlen;
566 skb->data_len += totlen;
567}
568
569static inline void
570bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
571 struct bnad_rx_unmap *unmap, u32 len)
572{
573 prefetch(skb->data);
574
575 dma_unmap_single(&bnad->pcidev->dev,
576 dma_unmap_addr(&unmap->vector, dma_addr),
577 unmap->vector.len, DMA_FROM_DEVICE);
578
579 skb_put(skb, len);
580 skb->protocol = eth_type_trans(skb, bnad->netdev);
581
582 unmap->skb = NULL;
583 unmap->vector.len = 0;
584}
585
586static u32
587bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
588{
589 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
590 struct bna_rcb *rcb = NULL;
591 struct bnad_rx_unmap_q *unmap_q;
592 struct bnad_rx_unmap *unmap = NULL;
593 struct sk_buff *skb = NULL;
594 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
595 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
596 u32 packets = 0, len = 0, totlen = 0;
597 u32 pi, vec, sop_ci = 0, nvecs = 0;
598 u32 flags, masked_flags;
599
600 prefetch(bnad->netdev);
601
602 cq = ccb->sw_q;
603
604 while (packets < budget) {
605 cmpl = &cq[ccb->producer_index];
606 if (!cmpl->valid)
607 break;
608
609
610
611
612
613
614
615 rmb();
616
617 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
618
619 if (bna_is_small_rxq(cmpl->rxq_id))
620 rcb = ccb->rcb[1];
621 else
622 rcb = ccb->rcb[0];
623
624 unmap_q = rcb->unmap_q;
625
626
627 sop_ci = rcb->consumer_index;
628
629 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
630 unmap = &unmap_q->unmap[sop_ci];
631 skb = unmap->skb;
632 } else {
633 skb = napi_get_frags(&rx_ctrl->napi);
634 if (unlikely(!skb))
635 break;
636 }
637 prefetch(skb);
638
639 flags = ntohl(cmpl->flags);
640 len = ntohs(cmpl->length);
641 totlen = len;
642 nvecs = 1;
643
644
645
646
647 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
648 (flags & BNA_CQ_EF_EOP) == 0) {
649 pi = ccb->producer_index;
650 do {
651 BNA_QE_INDX_INC(pi, ccb->q_depth);
652 next_cmpl = &cq[pi];
653
654 if (!next_cmpl->valid)
655 break;
656
657
658
659
660
661
662
663
664
665 rmb();
666
667 len = ntohs(next_cmpl->length);
668 flags = ntohl(next_cmpl->flags);
669
670 nvecs++;
671 totlen += len;
672 } while ((flags & BNA_CQ_EF_EOP) == 0);
673
674 if (!next_cmpl->valid)
675 break;
676 }
677 packets++;
678
679
680 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
681 BNA_CQ_EF_FCS_ERROR |
682 BNA_CQ_EF_TOO_LONG))) {
683 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
684 rcb->rxq->rx_packets_with_error++;
685
686 goto next;
687 }
688
689 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
690 bnad_cq_setup_skb(bnad, skb, unmap, len);
691 else
692 bnad_cq_setup_skb_frags(ccb, skb, nvecs);
693
694 rcb->rxq->rx_packets++;
695 rcb->rxq->rx_bytes += totlen;
696 ccb->bytes_per_intr += totlen;
697
698 masked_flags = flags & flags_cksum_prot_mask;
699
700 if (likely
701 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
702 ((masked_flags == flags_tcp4) ||
703 (masked_flags == flags_udp4) ||
704 (masked_flags == flags_tcp6) ||
705 (masked_flags == flags_udp6))))
706 skb->ip_summed = CHECKSUM_UNNECESSARY;
707 else
708 skb_checksum_none_assert(skb);
709
710 if ((flags & BNA_CQ_EF_VLAN) &&
711 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
712 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
713
714 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
715 netif_receive_skb(skb);
716 else
717 napi_gro_frags(&rx_ctrl->napi);
718
719next:
720 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
721 for (vec = 0; vec < nvecs; vec++) {
722 cmpl = &cq[ccb->producer_index];
723 cmpl->valid = 0;
724 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
725 }
726 }
727
728 napi_gro_flush(&rx_ctrl->napi, false);
729 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
730 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
731
732 bnad_rxq_post(bnad, ccb->rcb[0]);
733 if (ccb->rcb[1])
734 bnad_rxq_post(bnad, ccb->rcb[1]);
735
736 return packets;
737}
738
739static void
740bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
741{
742 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
743 struct napi_struct *napi = &rx_ctrl->napi;
744
745 if (likely(napi_schedule_prep(napi))) {
746 __napi_schedule(napi);
747 rx_ctrl->rx_schedule++;
748 }
749}
750
751
752static irqreturn_t
753bnad_msix_rx(int irq, void *data)
754{
755 struct bna_ccb *ccb = (struct bna_ccb *)data;
756
757 if (ccb) {
758 ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
759 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
760 }
761
762 return IRQ_HANDLED;
763}
764
765
766
767
768static irqreturn_t
769bnad_msix_mbox_handler(int irq, void *data)
770{
771 u32 intr_status;
772 unsigned long flags;
773 struct bnad *bnad = (struct bnad *)data;
774
775 spin_lock_irqsave(&bnad->bna_lock, flags);
776 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
777 spin_unlock_irqrestore(&bnad->bna_lock, flags);
778 return IRQ_HANDLED;
779 }
780
781 bna_intr_status_get(&bnad->bna, intr_status);
782
783 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
784 bna_mbox_handler(&bnad->bna, intr_status);
785
786 spin_unlock_irqrestore(&bnad->bna_lock, flags);
787
788 return IRQ_HANDLED;
789}
790
791static irqreturn_t
792bnad_isr(int irq, void *data)
793{
794 int i, j;
795 u32 intr_status;
796 unsigned long flags;
797 struct bnad *bnad = (struct bnad *)data;
798 struct bnad_rx_info *rx_info;
799 struct bnad_rx_ctrl *rx_ctrl;
800 struct bna_tcb *tcb = NULL;
801
802 spin_lock_irqsave(&bnad->bna_lock, flags);
803 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
804 spin_unlock_irqrestore(&bnad->bna_lock, flags);
805 return IRQ_NONE;
806 }
807
808 bna_intr_status_get(&bnad->bna, intr_status);
809
810 if (unlikely(!intr_status)) {
811 spin_unlock_irqrestore(&bnad->bna_lock, flags);
812 return IRQ_NONE;
813 }
814
815 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
816 bna_mbox_handler(&bnad->bna, intr_status);
817
818 spin_unlock_irqrestore(&bnad->bna_lock, flags);
819
820 if (!BNA_IS_INTX_DATA_INTR(intr_status))
821 return IRQ_HANDLED;
822
823
824
825 for (i = 0; i < bnad->num_tx; i++) {
826 for (j = 0; j < bnad->num_txq_per_tx; j++) {
827 tcb = bnad->tx_info[i].tcb[j];
828 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
829 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
830 }
831 }
832
833 for (i = 0; i < bnad->num_rx; i++) {
834 rx_info = &bnad->rx_info[i];
835 if (!rx_info->rx)
836 continue;
837 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
838 rx_ctrl = &rx_info->rx_ctrl[j];
839 if (rx_ctrl->ccb)
840 bnad_netif_rx_schedule_poll(bnad,
841 rx_ctrl->ccb);
842 }
843 }
844 return IRQ_HANDLED;
845}
846
847
848
849
850
851static void
852bnad_enable_mbox_irq(struct bnad *bnad)
853{
854 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
855
856 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
857}
858
859
860
861
862
863static void
864bnad_disable_mbox_irq(struct bnad *bnad)
865{
866 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
867
868 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
869}
870
871static void
872bnad_set_netdev_perm_addr(struct bnad *bnad)
873{
874 struct net_device *netdev = bnad->netdev;
875
876 ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
877 if (is_zero_ether_addr(netdev->dev_addr))
878 ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
879}
880
881
882
883
884void
885bnad_cb_mbox_intr_enable(struct bnad *bnad)
886{
887 bnad_enable_mbox_irq(bnad);
888}
889
890void
891bnad_cb_mbox_intr_disable(struct bnad *bnad)
892{
893 bnad_disable_mbox_irq(bnad);
894}
895
896void
897bnad_cb_ioceth_ready(struct bnad *bnad)
898{
899 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
900 complete(&bnad->bnad_completions.ioc_comp);
901}
902
903void
904bnad_cb_ioceth_failed(struct bnad *bnad)
905{
906 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
907 complete(&bnad->bnad_completions.ioc_comp);
908}
909
910void
911bnad_cb_ioceth_disabled(struct bnad *bnad)
912{
913 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
914 complete(&bnad->bnad_completions.ioc_comp);
915}
916
917static void
918bnad_cb_enet_disabled(void *arg)
919{
920 struct bnad *bnad = (struct bnad *)arg;
921
922 netif_carrier_off(bnad->netdev);
923 complete(&bnad->bnad_completions.enet_comp);
924}
925
926void
927bnad_cb_ethport_link_status(struct bnad *bnad,
928 enum bna_link_status link_status)
929{
930 bool link_up = false;
931
932 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
933
934 if (link_status == BNA_CEE_UP) {
935 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
936 BNAD_UPDATE_CTR(bnad, cee_toggle);
937 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
938 } else {
939 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
940 BNAD_UPDATE_CTR(bnad, cee_toggle);
941 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
942 }
943
944 if (link_up) {
945 if (!netif_carrier_ok(bnad->netdev)) {
946 uint tx_id, tcb_id;
947 netdev_info(bnad->netdev, "link up\n");
948 netif_carrier_on(bnad->netdev);
949 BNAD_UPDATE_CTR(bnad, link_toggle);
950 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
951 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
952 tcb_id++) {
953 struct bna_tcb *tcb =
954 bnad->tx_info[tx_id].tcb[tcb_id];
955 u32 txq_id;
956 if (!tcb)
957 continue;
958
959 txq_id = tcb->id;
960
961 if (test_bit(BNAD_TXQ_TX_STARTED,
962 &tcb->flags)) {
963
964
965
966 netif_wake_subqueue(
967 bnad->netdev,
968 txq_id);
969 BNAD_UPDATE_CTR(bnad,
970 netif_queue_wakeup);
971 } else {
972 netif_stop_subqueue(
973 bnad->netdev,
974 txq_id);
975 BNAD_UPDATE_CTR(bnad,
976 netif_queue_stop);
977 }
978 }
979 }
980 }
981 } else {
982 if (netif_carrier_ok(bnad->netdev)) {
983 netdev_info(bnad->netdev, "link down\n");
984 netif_carrier_off(bnad->netdev);
985 BNAD_UPDATE_CTR(bnad, link_toggle);
986 }
987 }
988}
989
990static void
991bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
992{
993 struct bnad *bnad = (struct bnad *)arg;
994
995 complete(&bnad->bnad_completions.tx_comp);
996}
997
998static void
999bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1000{
1001 struct bnad_tx_info *tx_info =
1002 (struct bnad_tx_info *)tcb->txq->tx->priv;
1003
1004 tcb->priv = tcb;
1005 tx_info->tcb[tcb->id] = tcb;
1006}
1007
1008static void
1009bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1010{
1011 struct bnad_tx_info *tx_info =
1012 (struct bnad_tx_info *)tcb->txq->tx->priv;
1013
1014 tx_info->tcb[tcb->id] = NULL;
1015 tcb->priv = NULL;
1016}
1017
1018static void
1019bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1020{
1021 struct bnad_rx_info *rx_info =
1022 (struct bnad_rx_info *)ccb->cq->rx->priv;
1023
1024 rx_info->rx_ctrl[ccb->id].ccb = ccb;
1025 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1026}
1027
1028static void
1029bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1030{
1031 struct bnad_rx_info *rx_info =
1032 (struct bnad_rx_info *)ccb->cq->rx->priv;
1033
1034 rx_info->rx_ctrl[ccb->id].ccb = NULL;
1035}
1036
1037static void
1038bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1039{
1040 struct bnad_tx_info *tx_info =
1041 (struct bnad_tx_info *)tx->priv;
1042 struct bna_tcb *tcb;
1043 u32 txq_id;
1044 int i;
1045
1046 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1047 tcb = tx_info->tcb[i];
1048 if (!tcb)
1049 continue;
1050 txq_id = tcb->id;
1051 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1052 netif_stop_subqueue(bnad->netdev, txq_id);
1053 }
1054}
1055
1056static void
1057bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1058{
1059 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1060 struct bna_tcb *tcb;
1061 u32 txq_id;
1062 int i;
1063
1064 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1065 tcb = tx_info->tcb[i];
1066 if (!tcb)
1067 continue;
1068 txq_id = tcb->id;
1069
1070 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1071 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1072 BUG_ON(*(tcb->hw_consumer_index) != 0);
1073
1074 if (netif_carrier_ok(bnad->netdev)) {
1075 netif_wake_subqueue(bnad->netdev, txq_id);
1076 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1077 }
1078 }
1079
1080
1081
1082
1083
1084
1085 if (is_zero_ether_addr(bnad->perm_addr)) {
1086 bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1087 bnad_set_netdev_perm_addr(bnad);
1088 }
1089}
1090
1091
1092
1093
1094static void
1095bnad_tx_cleanup(struct delayed_work *work)
1096{
1097 struct bnad_tx_info *tx_info =
1098 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1099 struct bnad *bnad = NULL;
1100 struct bna_tcb *tcb;
1101 unsigned long flags;
1102 u32 i, pending = 0;
1103
1104 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1105 tcb = tx_info->tcb[i];
1106 if (!tcb)
1107 continue;
1108
1109 bnad = tcb->bnad;
1110
1111 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1112 pending++;
1113 continue;
1114 }
1115
1116 bnad_txq_cleanup(bnad, tcb);
1117
1118 smp_mb__before_atomic();
1119 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1120 }
1121
1122 if (pending) {
1123 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1124 msecs_to_jiffies(1));
1125 return;
1126 }
1127
1128 spin_lock_irqsave(&bnad->bna_lock, flags);
1129 bna_tx_cleanup_complete(tx_info->tx);
1130 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1131}
1132
1133static void
1134bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1135{
1136 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1137 struct bna_tcb *tcb;
1138 int i;
1139
1140 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1141 tcb = tx_info->tcb[i];
1142 if (!tcb)
1143 continue;
1144 }
1145
1146 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1147}
1148
1149static void
1150bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1151{
1152 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1153 struct bna_ccb *ccb;
1154 struct bnad_rx_ctrl *rx_ctrl;
1155 int i;
1156
1157 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1158 rx_ctrl = &rx_info->rx_ctrl[i];
1159 ccb = rx_ctrl->ccb;
1160 if (!ccb)
1161 continue;
1162
1163 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1164
1165 if (ccb->rcb[1])
1166 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1167 }
1168}
1169
1170
1171
1172
1173static void
1174bnad_rx_cleanup(void *work)
1175{
1176 struct bnad_rx_info *rx_info =
1177 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1178 struct bnad_rx_ctrl *rx_ctrl;
1179 struct bnad *bnad = NULL;
1180 unsigned long flags;
1181 u32 i;
1182
1183 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1184 rx_ctrl = &rx_info->rx_ctrl[i];
1185
1186 if (!rx_ctrl->ccb)
1187 continue;
1188
1189 bnad = rx_ctrl->ccb->bnad;
1190
1191
1192
1193
1194
1195 napi_disable(&rx_ctrl->napi);
1196
1197 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1198 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1199 if (rx_ctrl->ccb->rcb[1])
1200 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1201 }
1202
1203 spin_lock_irqsave(&bnad->bna_lock, flags);
1204 bna_rx_cleanup_complete(rx_info->rx);
1205 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1206}
1207
1208static void
1209bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1210{
1211 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1212 struct bna_ccb *ccb;
1213 struct bnad_rx_ctrl *rx_ctrl;
1214 int i;
1215
1216 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1217 rx_ctrl = &rx_info->rx_ctrl[i];
1218 ccb = rx_ctrl->ccb;
1219 if (!ccb)
1220 continue;
1221
1222 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1223
1224 if (ccb->rcb[1])
1225 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1226 }
1227
1228 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1229}
1230
1231static void
1232bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1233{
1234 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1235 struct bna_ccb *ccb;
1236 struct bna_rcb *rcb;
1237 struct bnad_rx_ctrl *rx_ctrl;
1238 int i, j;
1239
1240 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1241 rx_ctrl = &rx_info->rx_ctrl[i];
1242 ccb = rx_ctrl->ccb;
1243 if (!ccb)
1244 continue;
1245
1246 napi_enable(&rx_ctrl->napi);
1247
1248 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1249 rcb = ccb->rcb[j];
1250 if (!rcb)
1251 continue;
1252
1253 bnad_rxq_alloc_init(bnad, rcb);
1254 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1255 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1256 bnad_rxq_post(bnad, rcb);
1257 }
1258 }
1259}
1260
1261static void
1262bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1263{
1264 struct bnad *bnad = (struct bnad *)arg;
1265
1266 complete(&bnad->bnad_completions.rx_comp);
1267}
1268
1269static void
1270bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1271{
1272 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1273 complete(&bnad->bnad_completions.mcast_comp);
1274}
1275
1276void
1277bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1278 struct bna_stats *stats)
1279{
1280 if (status == BNA_CB_SUCCESS)
1281 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1282
1283 if (!netif_running(bnad->netdev) ||
1284 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1285 return;
1286
1287 mod_timer(&bnad->stats_timer,
1288 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1289}
1290
1291static void
1292bnad_cb_enet_mtu_set(struct bnad *bnad)
1293{
1294 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1295 complete(&bnad->bnad_completions.mtu_comp);
1296}
1297
1298void
1299bnad_cb_completion(void *arg, enum bfa_status status)
1300{
1301 struct bnad_iocmd_comp *iocmd_comp =
1302 (struct bnad_iocmd_comp *)arg;
1303
1304 iocmd_comp->comp_status = (u32) status;
1305 complete(&iocmd_comp->comp);
1306}
1307
1308
1309
1310static void
1311bnad_mem_free(struct bnad *bnad,
1312 struct bna_mem_info *mem_info)
1313{
1314 int i;
1315 dma_addr_t dma_pa;
1316
1317 if (mem_info->mdl == NULL)
1318 return;
1319
1320 for (i = 0; i < mem_info->num; i++) {
1321 if (mem_info->mdl[i].kva != NULL) {
1322 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1323 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1324 dma_pa);
1325 dma_free_coherent(&bnad->pcidev->dev,
1326 mem_info->mdl[i].len,
1327 mem_info->mdl[i].kva, dma_pa);
1328 } else
1329 kfree(mem_info->mdl[i].kva);
1330 }
1331 }
1332 kfree(mem_info->mdl);
1333 mem_info->mdl = NULL;
1334}
1335
1336static int
1337bnad_mem_alloc(struct bnad *bnad,
1338 struct bna_mem_info *mem_info)
1339{
1340 int i;
1341 dma_addr_t dma_pa;
1342
1343 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1344 mem_info->mdl = NULL;
1345 return 0;
1346 }
1347
1348 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1349 GFP_KERNEL);
1350 if (mem_info->mdl == NULL)
1351 return -ENOMEM;
1352
1353 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1354 for (i = 0; i < mem_info->num; i++) {
1355 mem_info->mdl[i].len = mem_info->len;
1356 mem_info->mdl[i].kva =
1357 dma_alloc_coherent(&bnad->pcidev->dev,
1358 mem_info->len, &dma_pa,
1359 GFP_KERNEL);
1360 if (mem_info->mdl[i].kva == NULL)
1361 goto err_return;
1362
1363 BNA_SET_DMA_ADDR(dma_pa,
1364 &(mem_info->mdl[i].dma));
1365 }
1366 } else {
1367 for (i = 0; i < mem_info->num; i++) {
1368 mem_info->mdl[i].len = mem_info->len;
1369 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1370 GFP_KERNEL);
1371 if (mem_info->mdl[i].kva == NULL)
1372 goto err_return;
1373 }
1374 }
1375
1376 return 0;
1377
1378err_return:
1379 bnad_mem_free(bnad, mem_info);
1380 return -ENOMEM;
1381}
1382
1383
1384static void
1385bnad_mbox_irq_free(struct bnad *bnad)
1386{
1387 int irq;
1388 unsigned long flags;
1389
1390 spin_lock_irqsave(&bnad->bna_lock, flags);
1391 bnad_disable_mbox_irq(bnad);
1392 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1393
1394 irq = BNAD_GET_MBOX_IRQ(bnad);
1395 free_irq(irq, bnad);
1396}
1397
1398
1399
1400
1401
1402
1403static int
1404bnad_mbox_irq_alloc(struct bnad *bnad)
1405{
1406 int err = 0;
1407 unsigned long irq_flags, flags;
1408 u32 irq;
1409 irq_handler_t irq_handler;
1410
1411 spin_lock_irqsave(&bnad->bna_lock, flags);
1412 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1413 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1414 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1415 irq_flags = 0;
1416 } else {
1417 irq_handler = (irq_handler_t)bnad_isr;
1418 irq = bnad->pcidev->irq;
1419 irq_flags = IRQF_SHARED;
1420 }
1421
1422 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1423 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1424
1425
1426
1427
1428
1429 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1430
1431 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1432
1433 err = request_irq(irq, irq_handler, irq_flags,
1434 bnad->mbox_irq_name, bnad);
1435
1436 return err;
1437}
1438
1439static void
1440bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1441{
1442 kfree(intr_info->idl);
1443 intr_info->idl = NULL;
1444}
1445
1446
1447static int
1448bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1449 u32 txrx_id, struct bna_intr_info *intr_info)
1450{
1451 int i, vector_start = 0;
1452 u32 cfg_flags;
1453 unsigned long flags;
1454
1455 spin_lock_irqsave(&bnad->bna_lock, flags);
1456 cfg_flags = bnad->cfg_flags;
1457 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1458
1459 if (cfg_flags & BNAD_CF_MSIX) {
1460 intr_info->intr_type = BNA_INTR_T_MSIX;
1461 intr_info->idl = kcalloc(intr_info->num,
1462 sizeof(struct bna_intr_descr),
1463 GFP_KERNEL);
1464 if (!intr_info->idl)
1465 return -ENOMEM;
1466
1467 switch (src) {
1468 case BNAD_INTR_TX:
1469 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1470 break;
1471
1472 case BNAD_INTR_RX:
1473 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1474 (bnad->num_tx * bnad->num_txq_per_tx) +
1475 txrx_id;
1476 break;
1477
1478 default:
1479 BUG();
1480 }
1481
1482 for (i = 0; i < intr_info->num; i++)
1483 intr_info->idl[i].vector = vector_start + i;
1484 } else {
1485 intr_info->intr_type = BNA_INTR_T_INTX;
1486 intr_info->num = 1;
1487 intr_info->idl = kcalloc(intr_info->num,
1488 sizeof(struct bna_intr_descr),
1489 GFP_KERNEL);
1490 if (!intr_info->idl)
1491 return -ENOMEM;
1492
1493 switch (src) {
1494 case BNAD_INTR_TX:
1495 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1496 break;
1497
1498 case BNAD_INTR_RX:
1499 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1500 break;
1501 }
1502 }
1503 return 0;
1504}
1505
1506
1507
1508
1509static void
1510bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1511 int num_txqs)
1512{
1513 int i;
1514 int vector_num;
1515
1516 for (i = 0; i < num_txqs; i++) {
1517 if (tx_info->tcb[i] == NULL)
1518 continue;
1519
1520 vector_num = tx_info->tcb[i]->intr_vector;
1521 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1522 }
1523}
1524
1525
1526
1527
1528static int
1529bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1530 u32 tx_id, int num_txqs)
1531{
1532 int i;
1533 int err;
1534 int vector_num;
1535
1536 for (i = 0; i < num_txqs; i++) {
1537 vector_num = tx_info->tcb[i]->intr_vector;
1538 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1539 tx_id + tx_info->tcb[i]->id);
1540 err = request_irq(bnad->msix_table[vector_num].vector,
1541 (irq_handler_t)bnad_msix_tx, 0,
1542 tx_info->tcb[i]->name,
1543 tx_info->tcb[i]);
1544 if (err)
1545 goto err_return;
1546 }
1547
1548 return 0;
1549
1550err_return:
1551 if (i > 0)
1552 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1553 return -1;
1554}
1555
1556
1557
1558
1559static void
1560bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1561 int num_rxps)
1562{
1563 int i;
1564 int vector_num;
1565
1566 for (i = 0; i < num_rxps; i++) {
1567 if (rx_info->rx_ctrl[i].ccb == NULL)
1568 continue;
1569
1570 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1571 free_irq(bnad->msix_table[vector_num].vector,
1572 rx_info->rx_ctrl[i].ccb);
1573 }
1574}
1575
1576
1577
1578
1579static int
1580bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1581 u32 rx_id, int num_rxps)
1582{
1583 int i;
1584 int err;
1585 int vector_num;
1586
1587 for (i = 0; i < num_rxps; i++) {
1588 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1589 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1590 bnad->netdev->name,
1591 rx_id + rx_info->rx_ctrl[i].ccb->id);
1592 err = request_irq(bnad->msix_table[vector_num].vector,
1593 (irq_handler_t)bnad_msix_rx, 0,
1594 rx_info->rx_ctrl[i].ccb->name,
1595 rx_info->rx_ctrl[i].ccb);
1596 if (err)
1597 goto err_return;
1598 }
1599
1600 return 0;
1601
1602err_return:
1603 if (i > 0)
1604 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1605 return -1;
1606}
1607
1608
1609static void
1610bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1611{
1612 int i;
1613
1614 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1615 if (res_info[i].res_type == BNA_RES_T_MEM)
1616 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1617 else if (res_info[i].res_type == BNA_RES_T_INTR)
1618 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1619 }
1620}
1621
1622
1623static int
1624bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1625 u32 tx_id)
1626{
1627 int i, err = 0;
1628
1629 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1630 if (res_info[i].res_type == BNA_RES_T_MEM)
1631 err = bnad_mem_alloc(bnad,
1632 &res_info[i].res_u.mem_info);
1633 else if (res_info[i].res_type == BNA_RES_T_INTR)
1634 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1635 &res_info[i].res_u.intr_info);
1636 if (err)
1637 goto err_return;
1638 }
1639 return 0;
1640
1641err_return:
1642 bnad_tx_res_free(bnad, res_info);
1643 return err;
1644}
1645
1646
1647static void
1648bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1649{
1650 int i;
1651
1652 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1653 if (res_info[i].res_type == BNA_RES_T_MEM)
1654 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1655 else if (res_info[i].res_type == BNA_RES_T_INTR)
1656 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1657 }
1658}
1659
1660
1661static int
1662bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1663 uint rx_id)
1664{
1665 int i, err = 0;
1666
1667
1668 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1669 if (res_info[i].res_type == BNA_RES_T_MEM)
1670 err = bnad_mem_alloc(bnad,
1671 &res_info[i].res_u.mem_info);
1672 else if (res_info[i].res_type == BNA_RES_T_INTR)
1673 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1674 &res_info[i].res_u.intr_info);
1675 if (err)
1676 goto err_return;
1677 }
1678 return 0;
1679
1680err_return:
1681 bnad_rx_res_free(bnad, res_info);
1682 return err;
1683}
1684
1685
1686
1687static void
1688bnad_ioc_timeout(struct timer_list *t)
1689{
1690 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
1691 unsigned long flags;
1692
1693 spin_lock_irqsave(&bnad->bna_lock, flags);
1694 bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1695 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1696}
1697
1698static void
1699bnad_ioc_hb_check(struct timer_list *t)
1700{
1701 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
1702 unsigned long flags;
1703
1704 spin_lock_irqsave(&bnad->bna_lock, flags);
1705 bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1706 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1707}
1708
1709static void
1710bnad_iocpf_timeout(struct timer_list *t)
1711{
1712 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
1713 unsigned long flags;
1714
1715 spin_lock_irqsave(&bnad->bna_lock, flags);
1716 bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1717 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1718}
1719
1720static void
1721bnad_iocpf_sem_timeout(struct timer_list *t)
1722{
1723 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
1724 unsigned long flags;
1725
1726 spin_lock_irqsave(&bnad->bna_lock, flags);
1727 bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1728 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1729}
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742static void
1743bnad_dim_timeout(struct timer_list *t)
1744{
1745 struct bnad *bnad = from_timer(bnad, t, dim_timer);
1746 struct bnad_rx_info *rx_info;
1747 struct bnad_rx_ctrl *rx_ctrl;
1748 int i, j;
1749 unsigned long flags;
1750
1751 if (!netif_carrier_ok(bnad->netdev))
1752 return;
1753
1754 spin_lock_irqsave(&bnad->bna_lock, flags);
1755 for (i = 0; i < bnad->num_rx; i++) {
1756 rx_info = &bnad->rx_info[i];
1757 if (!rx_info->rx)
1758 continue;
1759 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1760 rx_ctrl = &rx_info->rx_ctrl[j];
1761 if (!rx_ctrl->ccb)
1762 continue;
1763 bna_rx_dim_update(rx_ctrl->ccb);
1764 }
1765 }
1766
1767
1768 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1769 mod_timer(&bnad->dim_timer,
1770 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1771 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1772}
1773
1774
1775static void
1776bnad_stats_timeout(struct timer_list *t)
1777{
1778 struct bnad *bnad = from_timer(bnad, t, stats_timer);
1779 unsigned long flags;
1780
1781 if (!netif_running(bnad->netdev) ||
1782 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1783 return;
1784
1785 spin_lock_irqsave(&bnad->bna_lock, flags);
1786 bna_hw_stats_get(&bnad->bna);
1787 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1788}
1789
1790
1791
1792
1793
1794void
1795bnad_dim_timer_start(struct bnad *bnad)
1796{
1797 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1798 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1799 timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
1800 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1801 mod_timer(&bnad->dim_timer,
1802 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1803 }
1804}
1805
1806
1807
1808
1809
1810static void
1811bnad_stats_timer_start(struct bnad *bnad)
1812{
1813 unsigned long flags;
1814
1815 spin_lock_irqsave(&bnad->bna_lock, flags);
1816 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1817 timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
1818 mod_timer(&bnad->stats_timer,
1819 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1820 }
1821 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1822}
1823
1824
1825
1826
1827
1828static void
1829bnad_stats_timer_stop(struct bnad *bnad)
1830{
1831 int to_del = 0;
1832 unsigned long flags;
1833
1834 spin_lock_irqsave(&bnad->bna_lock, flags);
1835 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1836 to_del = 1;
1837 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1838 if (to_del)
1839 del_timer_sync(&bnad->stats_timer);
1840}
1841
1842
1843
1844static void
1845bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1846{
1847 int i = 1;
1848 struct netdev_hw_addr *mc_addr;
1849
1850 netdev_for_each_mc_addr(mc_addr, netdev) {
1851 ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
1852 i++;
1853 }
1854}
1855
1856static int
1857bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1858{
1859 struct bnad_rx_ctrl *rx_ctrl =
1860 container_of(napi, struct bnad_rx_ctrl, napi);
1861 struct bnad *bnad = rx_ctrl->bnad;
1862 int rcvd = 0;
1863
1864 rx_ctrl->rx_poll_ctr++;
1865
1866 if (!netif_carrier_ok(bnad->netdev))
1867 goto poll_exit;
1868
1869 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1870 if (rcvd >= budget)
1871 return rcvd;
1872
1873poll_exit:
1874 napi_complete_done(napi, rcvd);
1875
1876 rx_ctrl->rx_complete++;
1877
1878 if (rx_ctrl->ccb)
1879 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1880
1881 return rcvd;
1882}
1883
1884#define BNAD_NAPI_POLL_QUOTA 64
1885static void
1886bnad_napi_add(struct bnad *bnad, u32 rx_id)
1887{
1888 struct bnad_rx_ctrl *rx_ctrl;
1889 int i;
1890
1891
1892 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1893 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1894 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1895 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1896 }
1897}
1898
1899static void
1900bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1901{
1902 int i;
1903
1904
1905 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1906 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1907}
1908
1909
1910void
1911bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1912{
1913 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1914 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1915 unsigned long flags;
1916
1917 if (!tx_info->tx)
1918 return;
1919
1920 init_completion(&bnad->bnad_completions.tx_comp);
1921 spin_lock_irqsave(&bnad->bna_lock, flags);
1922 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1923 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1924 wait_for_completion(&bnad->bnad_completions.tx_comp);
1925
1926 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1927 bnad_tx_msix_unregister(bnad, tx_info,
1928 bnad->num_txq_per_tx);
1929
1930 spin_lock_irqsave(&bnad->bna_lock, flags);
1931 bna_tx_destroy(tx_info->tx);
1932 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1933
1934 tx_info->tx = NULL;
1935 tx_info->tx_id = 0;
1936
1937 bnad_tx_res_free(bnad, res_info);
1938}
1939
1940
1941int
1942bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1943{
1944 int err;
1945 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1946 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1947 struct bna_intr_info *intr_info =
1948 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1949 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1950 static const struct bna_tx_event_cbfn tx_cbfn = {
1951 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1952 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1953 .tx_stall_cbfn = bnad_cb_tx_stall,
1954 .tx_resume_cbfn = bnad_cb_tx_resume,
1955 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1956 };
1957
1958 struct bna_tx *tx;
1959 unsigned long flags;
1960
1961 tx_info->tx_id = tx_id;
1962
1963
1964 tx_config->num_txq = bnad->num_txq_per_tx;
1965 tx_config->txq_depth = bnad->txq_depth;
1966 tx_config->tx_type = BNA_TX_T_REGULAR;
1967 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1968
1969
1970 spin_lock_irqsave(&bnad->bna_lock, flags);
1971 bna_tx_res_req(bnad->num_txq_per_tx,
1972 bnad->txq_depth, res_info);
1973 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1974
1975
1976 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1977 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1978 bnad->txq_depth));
1979
1980
1981 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1982 if (err)
1983 return err;
1984
1985
1986 spin_lock_irqsave(&bnad->bna_lock, flags);
1987 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1988 tx_info);
1989 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1990 if (!tx) {
1991 err = -ENOMEM;
1992 goto err_return;
1993 }
1994 tx_info->tx = tx;
1995
1996 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1997 (work_func_t)bnad_tx_cleanup);
1998
1999
2000 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2001 err = bnad_tx_msix_register(bnad, tx_info,
2002 tx_id, bnad->num_txq_per_tx);
2003 if (err)
2004 goto cleanup_tx;
2005 }
2006
2007 spin_lock_irqsave(&bnad->bna_lock, flags);
2008 bna_tx_enable(tx);
2009 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2010
2011 return 0;
2012
2013cleanup_tx:
2014 spin_lock_irqsave(&bnad->bna_lock, flags);
2015 bna_tx_destroy(tx_info->tx);
2016 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2017 tx_info->tx = NULL;
2018 tx_info->tx_id = 0;
2019err_return:
2020 bnad_tx_res_free(bnad, res_info);
2021 return err;
2022}
2023
2024
2025
2026static void
2027bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2028{
2029 memset(rx_config, 0, sizeof(*rx_config));
2030 rx_config->rx_type = BNA_RX_T_REGULAR;
2031 rx_config->num_paths = bnad->num_rxp_per_rx;
2032 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2033
2034 if (bnad->num_rxp_per_rx > 1) {
2035 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2036 rx_config->rss_config.hash_type =
2037 (BFI_ENET_RSS_IPV6 |
2038 BFI_ENET_RSS_IPV6_TCP |
2039 BFI_ENET_RSS_IPV4 |
2040 BFI_ENET_RSS_IPV4_TCP);
2041 rx_config->rss_config.hash_mask =
2042 bnad->num_rxp_per_rx - 1;
2043 netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
2044 sizeof(rx_config->rss_config.toeplitz_hash_key));
2045 } else {
2046 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2047 memset(&rx_config->rss_config, 0,
2048 sizeof(rx_config->rss_config));
2049 }
2050
2051 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2052 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2053
2054
2055
2056
2057
2058
2059 rx_config->rxp_type = BNA_RXP_SLR;
2060
2061 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2062 rx_config->frame_size > 4096) {
2063
2064
2065
2066
2067 rx_config->q0_buf_size = 2048;
2068
2069 rx_config->q0_num_vecs = 4;
2070 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2071 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2072 } else {
2073 rx_config->q0_buf_size = rx_config->frame_size;
2074 rx_config->q0_num_vecs = 1;
2075 rx_config->q0_depth = bnad->rxq_depth;
2076 }
2077
2078
2079 if (rx_config->rxp_type == BNA_RXP_SLR) {
2080 rx_config->q1_depth = bnad->rxq_depth;
2081 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2082 }
2083
2084 rx_config->vlan_strip_status =
2085 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2086 BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2087}
2088
2089static void
2090bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2091{
2092 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2093 int i;
2094
2095 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2096 rx_info->rx_ctrl[i].bnad = bnad;
2097}
2098
2099
2100static u32
2101bnad_reinit_rx(struct bnad *bnad)
2102{
2103 struct net_device *netdev = bnad->netdev;
2104 u32 err = 0, current_err = 0;
2105 u32 rx_id = 0, count = 0;
2106 unsigned long flags;
2107
2108
2109 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2110 if (!bnad->rx_info[rx_id].rx)
2111 continue;
2112 bnad_destroy_rx(bnad, rx_id);
2113 }
2114
2115 spin_lock_irqsave(&bnad->bna_lock, flags);
2116 bna_enet_mtu_set(&bnad->bna.enet,
2117 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2118 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2119
2120 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2121 count++;
2122 current_err = bnad_setup_rx(bnad, rx_id);
2123 if (current_err && !err) {
2124 err = current_err;
2125 netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
2126 }
2127 }
2128
2129
2130 if (bnad->rx_info[0].rx && !err) {
2131 bnad_restore_vlans(bnad, 0);
2132 bnad_enable_default_bcast(bnad);
2133 spin_lock_irqsave(&bnad->bna_lock, flags);
2134 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2135 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2136 bnad_set_rx_mode(netdev);
2137 }
2138
2139 return count;
2140}
2141
2142
2143void
2144bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2145{
2146 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2147 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2148 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2149 unsigned long flags;
2150 int to_del = 0;
2151
2152 if (!rx_info->rx)
2153 return;
2154
2155 if (0 == rx_id) {
2156 spin_lock_irqsave(&bnad->bna_lock, flags);
2157 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2158 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2159 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2160 to_del = 1;
2161 }
2162 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2163 if (to_del)
2164 del_timer_sync(&bnad->dim_timer);
2165 }
2166
2167 init_completion(&bnad->bnad_completions.rx_comp);
2168 spin_lock_irqsave(&bnad->bna_lock, flags);
2169 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2170 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2171 wait_for_completion(&bnad->bnad_completions.rx_comp);
2172
2173 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2174 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2175
2176 bnad_napi_delete(bnad, rx_id);
2177
2178 spin_lock_irqsave(&bnad->bna_lock, flags);
2179 bna_rx_destroy(rx_info->rx);
2180
2181 rx_info->rx = NULL;
2182 rx_info->rx_id = 0;
2183 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2184
2185 bnad_rx_res_free(bnad, res_info);
2186}
2187
2188
2189int
2190bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2191{
2192 int err;
2193 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2194 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2195 struct bna_intr_info *intr_info =
2196 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2197 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2198 static const struct bna_rx_event_cbfn rx_cbfn = {
2199 .rcb_setup_cbfn = NULL,
2200 .rcb_destroy_cbfn = NULL,
2201 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2202 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2203 .rx_stall_cbfn = bnad_cb_rx_stall,
2204 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2205 .rx_post_cbfn = bnad_cb_rx_post,
2206 };
2207 struct bna_rx *rx;
2208 unsigned long flags;
2209
2210 rx_info->rx_id = rx_id;
2211
2212
2213 bnad_init_rx_config(bnad, rx_config);
2214
2215
2216 spin_lock_irqsave(&bnad->bna_lock, flags);
2217 bna_rx_res_req(rx_config, res_info);
2218 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2219
2220
2221 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2222 rx_config->num_paths,
2223 (rx_config->q0_depth *
2224 sizeof(struct bnad_rx_unmap)) +
2225 sizeof(struct bnad_rx_unmap_q));
2226
2227 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2228 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2229 rx_config->num_paths,
2230 (rx_config->q1_depth *
2231 sizeof(struct bnad_rx_unmap) +
2232 sizeof(struct bnad_rx_unmap_q)));
2233 }
2234
2235 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2236 if (err)
2237 return err;
2238
2239 bnad_rx_ctrl_init(bnad, rx_id);
2240
2241
2242 spin_lock_irqsave(&bnad->bna_lock, flags);
2243 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2244 rx_info);
2245 if (!rx) {
2246 err = -ENOMEM;
2247 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2248 goto err_return;
2249 }
2250 rx_info->rx = rx;
2251 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2252
2253 INIT_WORK(&rx_info->rx_cleanup_work,
2254 (work_func_t)(bnad_rx_cleanup));
2255
2256
2257
2258
2259
2260 bnad_napi_add(bnad, rx_id);
2261
2262
2263 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2264 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2265 rx_config->num_paths);
2266 if (err)
2267 goto err_return;
2268 }
2269
2270 spin_lock_irqsave(&bnad->bna_lock, flags);
2271 if (0 == rx_id) {
2272
2273 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2274 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2275
2276
2277 bna_rx_vlanfilter_enable(rx);
2278
2279
2280 bnad_dim_timer_start(bnad);
2281 }
2282
2283 bna_rx_enable(rx);
2284 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2285
2286 return 0;
2287
2288err_return:
2289 bnad_destroy_rx(bnad, rx_id);
2290 return err;
2291}
2292
2293
2294void
2295bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2296{
2297 struct bnad_tx_info *tx_info;
2298
2299 tx_info = &bnad->tx_info[0];
2300 if (!tx_info->tx)
2301 return;
2302
2303 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2304}
2305
2306
2307void
2308bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2309{
2310 struct bnad_rx_info *rx_info;
2311 int i;
2312
2313 for (i = 0; i < bnad->num_rx; i++) {
2314 rx_info = &bnad->rx_info[i];
2315 if (!rx_info->rx)
2316 continue;
2317 bna_rx_coalescing_timeo_set(rx_info->rx,
2318 bnad->rx_coalescing_timeo);
2319 }
2320}
2321
2322
2323
2324
2325int
2326bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2327{
2328 int ret;
2329
2330 if (!is_valid_ether_addr(mac_addr))
2331 return -EADDRNOTAVAIL;
2332
2333
2334 if (!bnad->rx_info[0].rx)
2335 return 0;
2336
2337 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2338 if (ret != BNA_CB_SUCCESS)
2339 return -EADDRNOTAVAIL;
2340
2341 return 0;
2342}
2343
2344
2345int
2346bnad_enable_default_bcast(struct bnad *bnad)
2347{
2348 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2349 int ret;
2350 unsigned long flags;
2351
2352 init_completion(&bnad->bnad_completions.mcast_comp);
2353
2354 spin_lock_irqsave(&bnad->bna_lock, flags);
2355 ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
2356 bnad_cb_rx_mcast_add);
2357 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2358
2359 if (ret == BNA_CB_SUCCESS)
2360 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2361 else
2362 return -ENODEV;
2363
2364 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2365 return -ENODEV;
2366
2367 return 0;
2368}
2369
2370
2371void
2372bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2373{
2374 u16 vid;
2375 unsigned long flags;
2376
2377 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2378 spin_lock_irqsave(&bnad->bna_lock, flags);
2379 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2380 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2381 }
2382}
2383
2384
2385void
2386bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2387{
2388 int i, j;
2389
2390 for (i = 0; i < bnad->num_rx; i++) {
2391 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2392 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2393 stats->rx_packets += bnad->rx_info[i].
2394 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2395 stats->rx_bytes += bnad->rx_info[i].
2396 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2397 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2398 bnad->rx_info[i].rx_ctrl[j].ccb->
2399 rcb[1]->rxq) {
2400 stats->rx_packets +=
2401 bnad->rx_info[i].rx_ctrl[j].
2402 ccb->rcb[1]->rxq->rx_packets;
2403 stats->rx_bytes +=
2404 bnad->rx_info[i].rx_ctrl[j].
2405 ccb->rcb[1]->rxq->rx_bytes;
2406 }
2407 }
2408 }
2409 }
2410 for (i = 0; i < bnad->num_tx; i++) {
2411 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2412 if (bnad->tx_info[i].tcb[j]) {
2413 stats->tx_packets +=
2414 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2415 stats->tx_bytes +=
2416 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2417 }
2418 }
2419 }
2420}
2421
2422
2423
2424
2425void
2426bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2427{
2428 struct bfi_enet_stats_mac *mac_stats;
2429 u32 bmap;
2430 int i;
2431
2432 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2433 stats->rx_errors =
2434 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2435 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2436 mac_stats->rx_undersize;
2437 stats->tx_errors = mac_stats->tx_fcs_error +
2438 mac_stats->tx_undersize;
2439 stats->rx_dropped = mac_stats->rx_drop;
2440 stats->tx_dropped = mac_stats->tx_drop;
2441 stats->multicast = mac_stats->rx_multicast;
2442 stats->collisions = mac_stats->tx_total_collision;
2443
2444 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2445
2446
2447
2448 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2449 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2450
2451 bmap = bna_rx_rid_mask(&bnad->bna);
2452 for (i = 0; bmap; i++) {
2453 if (bmap & 1) {
2454 stats->rx_fifo_errors +=
2455 bnad->stats.bna_stats->
2456 hw_stats.rxf_stats[i].frame_drops;
2457 break;
2458 }
2459 bmap >>= 1;
2460 }
2461}
2462
2463static void
2464bnad_mbox_irq_sync(struct bnad *bnad)
2465{
2466 u32 irq;
2467 unsigned long flags;
2468
2469 spin_lock_irqsave(&bnad->bna_lock, flags);
2470 if (bnad->cfg_flags & BNAD_CF_MSIX)
2471 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2472 else
2473 irq = bnad->pcidev->irq;
2474 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2475
2476 synchronize_irq(irq);
2477}
2478
2479
2480static int
2481bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2482{
2483 int err;
2484
2485 err = skb_cow_head(skb, 0);
2486 if (err < 0) {
2487 BNAD_UPDATE_CTR(bnad, tso_err);
2488 return err;
2489 }
2490
2491
2492
2493
2494
2495 if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2496 struct iphdr *iph = ip_hdr(skb);
2497
2498
2499 iph->tot_len = 0;
2500 iph->check = 0;
2501
2502 tcp_hdr(skb)->check =
2503 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2504 IPPROTO_TCP, 0);
2505 BNAD_UPDATE_CTR(bnad, tso4);
2506 } else {
2507 tcp_v6_gso_csum_prep(skb);
2508 BNAD_UPDATE_CTR(bnad, tso6);
2509 }
2510
2511 return 0;
2512}
2513
2514
2515
2516
2517
2518
2519static void
2520bnad_q_num_init(struct bnad *bnad)
2521{
2522 int rxps;
2523
2524 rxps = min((uint)num_online_cpus(),
2525 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2526
2527 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2528 rxps = 1;
2529
2530 bnad->num_rx = 1;
2531 bnad->num_tx = 1;
2532 bnad->num_rxp_per_rx = rxps;
2533 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2534}
2535
2536
2537
2538
2539
2540
2541
2542static void
2543bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2544{
2545 bnad->num_txq_per_tx = 1;
2546 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2547 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2548 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2549 bnad->num_rxp_per_rx = msix_vectors -
2550 (bnad->num_tx * bnad->num_txq_per_tx) -
2551 BNAD_MAILBOX_MSIX_VECTORS;
2552 } else
2553 bnad->num_rxp_per_rx = 1;
2554}
2555
2556
2557static int
2558bnad_ioceth_disable(struct bnad *bnad)
2559{
2560 unsigned long flags;
2561 int err = 0;
2562
2563 spin_lock_irqsave(&bnad->bna_lock, flags);
2564 init_completion(&bnad->bnad_completions.ioc_comp);
2565 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2566 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2567
2568 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2569 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2570
2571 err = bnad->bnad_completions.ioc_comp_status;
2572 return err;
2573}
2574
2575static int
2576bnad_ioceth_enable(struct bnad *bnad)
2577{
2578 int err = 0;
2579 unsigned long flags;
2580
2581 spin_lock_irqsave(&bnad->bna_lock, flags);
2582 init_completion(&bnad->bnad_completions.ioc_comp);
2583 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2584 bna_ioceth_enable(&bnad->bna.ioceth);
2585 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2586
2587 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2588 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2589
2590 err = bnad->bnad_completions.ioc_comp_status;
2591
2592 return err;
2593}
2594
2595
2596static void
2597bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2598 u32 res_val_max)
2599{
2600 int i;
2601
2602 for (i = 0; i < res_val_max; i++)
2603 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2604}
2605
2606
2607static int
2608bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2609 u32 res_val_max)
2610{
2611 int i, err;
2612
2613 for (i = 0; i < res_val_max; i++) {
2614 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2615 if (err)
2616 goto err_return;
2617 }
2618 return 0;
2619
2620err_return:
2621 bnad_res_free(bnad, res_info, res_val_max);
2622 return err;
2623}
2624
2625
2626static void
2627bnad_enable_msix(struct bnad *bnad)
2628{
2629 int i, ret;
2630 unsigned long flags;
2631
2632 spin_lock_irqsave(&bnad->bna_lock, flags);
2633 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2634 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2635 return;
2636 }
2637 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2638
2639 if (bnad->msix_table)
2640 return;
2641
2642 bnad->msix_table =
2643 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2644
2645 if (!bnad->msix_table)
2646 goto intx_mode;
2647
2648 for (i = 0; i < bnad->msix_num; i++)
2649 bnad->msix_table[i].entry = i;
2650
2651 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2652 1, bnad->msix_num);
2653 if (ret < 0) {
2654 goto intx_mode;
2655 } else if (ret < bnad->msix_num) {
2656 dev_warn(&bnad->pcidev->dev,
2657 "%d MSI-X vectors allocated < %d requested\n",
2658 ret, bnad->msix_num);
2659
2660 spin_lock_irqsave(&bnad->bna_lock, flags);
2661
2662 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2663 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2664 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2665
2666 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2667 BNAD_MAILBOX_MSIX_VECTORS;
2668
2669 if (bnad->msix_num > ret) {
2670 pci_disable_msix(bnad->pcidev);
2671 goto intx_mode;
2672 }
2673 }
2674
2675 pci_intx(bnad->pcidev, 0);
2676
2677 return;
2678
2679intx_mode:
2680 dev_warn(&bnad->pcidev->dev,
2681 "MSI-X enable failed - operating in INTx mode\n");
2682
2683 kfree(bnad->msix_table);
2684 bnad->msix_table = NULL;
2685 bnad->msix_num = 0;
2686 spin_lock_irqsave(&bnad->bna_lock, flags);
2687 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2688 bnad_q_num_init(bnad);
2689 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2690}
2691
2692static void
2693bnad_disable_msix(struct bnad *bnad)
2694{
2695 u32 cfg_flags;
2696 unsigned long flags;
2697
2698 spin_lock_irqsave(&bnad->bna_lock, flags);
2699 cfg_flags = bnad->cfg_flags;
2700 if (bnad->cfg_flags & BNAD_CF_MSIX)
2701 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2702 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2703
2704 if (cfg_flags & BNAD_CF_MSIX) {
2705 pci_disable_msix(bnad->pcidev);
2706 kfree(bnad->msix_table);
2707 bnad->msix_table = NULL;
2708 }
2709}
2710
2711
2712static int
2713bnad_open(struct net_device *netdev)
2714{
2715 int err;
2716 struct bnad *bnad = netdev_priv(netdev);
2717 struct bna_pause_config pause_config;
2718 unsigned long flags;
2719
2720 mutex_lock(&bnad->conf_mutex);
2721
2722
2723 err = bnad_setup_tx(bnad, 0);
2724 if (err)
2725 goto err_return;
2726
2727
2728 err = bnad_setup_rx(bnad, 0);
2729 if (err)
2730 goto cleanup_tx;
2731
2732
2733 pause_config.tx_pause = 0;
2734 pause_config.rx_pause = 0;
2735
2736 spin_lock_irqsave(&bnad->bna_lock, flags);
2737 bna_enet_mtu_set(&bnad->bna.enet,
2738 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2739 bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2740 bna_enet_enable(&bnad->bna.enet);
2741 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2742
2743
2744 bnad_enable_default_bcast(bnad);
2745
2746
2747 bnad_restore_vlans(bnad, 0);
2748
2749
2750 spin_lock_irqsave(&bnad->bna_lock, flags);
2751 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2752 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2753
2754
2755 bnad_stats_timer_start(bnad);
2756
2757 mutex_unlock(&bnad->conf_mutex);
2758
2759 return 0;
2760
2761cleanup_tx:
2762 bnad_destroy_tx(bnad, 0);
2763
2764err_return:
2765 mutex_unlock(&bnad->conf_mutex);
2766 return err;
2767}
2768
2769static int
2770bnad_stop(struct net_device *netdev)
2771{
2772 struct bnad *bnad = netdev_priv(netdev);
2773 unsigned long flags;
2774
2775 mutex_lock(&bnad->conf_mutex);
2776
2777
2778 bnad_stats_timer_stop(bnad);
2779
2780 init_completion(&bnad->bnad_completions.enet_comp);
2781
2782 spin_lock_irqsave(&bnad->bna_lock, flags);
2783 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2784 bnad_cb_enet_disabled);
2785 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2786
2787 wait_for_completion(&bnad->bnad_completions.enet_comp);
2788
2789 bnad_destroy_tx(bnad, 0);
2790 bnad_destroy_rx(bnad, 0);
2791
2792
2793 bnad_mbox_irq_sync(bnad);
2794
2795 mutex_unlock(&bnad->conf_mutex);
2796
2797 return 0;
2798}
2799
2800
2801
2802static int
2803bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2804 struct sk_buff *skb, struct bna_txq_entry *txqent)
2805{
2806 u16 flags = 0;
2807 u32 gso_size;
2808 u16 vlan_tag = 0;
2809
2810 if (skb_vlan_tag_present(skb)) {
2811 vlan_tag = (u16)skb_vlan_tag_get(skb);
2812 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2813 }
2814 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2815 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2816 | (vlan_tag & 0x1fff);
2817 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2818 }
2819 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2820
2821 if (skb_is_gso(skb)) {
2822 gso_size = skb_shinfo(skb)->gso_size;
2823 if (unlikely(gso_size > bnad->netdev->mtu)) {
2824 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2825 return -EINVAL;
2826 }
2827 if (unlikely((gso_size + skb_transport_offset(skb) +
2828 tcp_hdrlen(skb)) >= skb->len)) {
2829 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2830 txqent->hdr.wi.lso_mss = 0;
2831 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2832 } else {
2833 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2834 txqent->hdr.wi.lso_mss = htons(gso_size);
2835 }
2836
2837 if (bnad_tso_prepare(bnad, skb)) {
2838 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2839 return -EINVAL;
2840 }
2841
2842 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2843 txqent->hdr.wi.l4_hdr_size_n_offset =
2844 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2845 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2846 } else {
2847 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2848 txqent->hdr.wi.lso_mss = 0;
2849
2850 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2851 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2852 return -EINVAL;
2853 }
2854
2855 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2856 __be16 net_proto = vlan_get_protocol(skb);
2857 u8 proto = 0;
2858
2859 if (net_proto == htons(ETH_P_IP))
2860 proto = ip_hdr(skb)->protocol;
2861#ifdef NETIF_F_IPV6_CSUM
2862 else if (net_proto == htons(ETH_P_IPV6)) {
2863
2864 proto = ipv6_hdr(skb)->nexthdr;
2865 }
2866#endif
2867 if (proto == IPPROTO_TCP) {
2868 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2869 txqent->hdr.wi.l4_hdr_size_n_offset =
2870 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2871 (0, skb_transport_offset(skb)));
2872
2873 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2874
2875 if (unlikely(skb_headlen(skb) <
2876 skb_transport_offset(skb) +
2877 tcp_hdrlen(skb))) {
2878 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2879 return -EINVAL;
2880 }
2881 } else if (proto == IPPROTO_UDP) {
2882 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2883 txqent->hdr.wi.l4_hdr_size_n_offset =
2884 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2885 (0, skb_transport_offset(skb)));
2886
2887 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2888 if (unlikely(skb_headlen(skb) <
2889 skb_transport_offset(skb) +
2890 sizeof(struct udphdr))) {
2891 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2892 return -EINVAL;
2893 }
2894 } else {
2895
2896 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2897 return -EINVAL;
2898 }
2899 } else
2900 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2901 }
2902
2903 txqent->hdr.wi.flags = htons(flags);
2904 txqent->hdr.wi.frame_length = htonl(skb->len);
2905
2906 return 0;
2907}
2908
2909
2910
2911
2912
2913static netdev_tx_t
2914bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2915{
2916 struct bnad *bnad = netdev_priv(netdev);
2917 u32 txq_id = 0;
2918 struct bna_tcb *tcb = NULL;
2919 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2920 u32 prod, q_depth, vect_id;
2921 u32 wis, vectors, len;
2922 int i;
2923 dma_addr_t dma_addr;
2924 struct bna_txq_entry *txqent;
2925
2926 len = skb_headlen(skb);
2927
2928
2929
2930 if (unlikely(skb->len <= ETH_HLEN)) {
2931 dev_kfree_skb_any(skb);
2932 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2933 return NETDEV_TX_OK;
2934 }
2935 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2936 dev_kfree_skb_any(skb);
2937 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2938 return NETDEV_TX_OK;
2939 }
2940 if (unlikely(len == 0)) {
2941 dev_kfree_skb_any(skb);
2942 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2943 return NETDEV_TX_OK;
2944 }
2945
2946 tcb = bnad->tx_info[0].tcb[txq_id];
2947
2948
2949
2950
2951
2952 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2953 dev_kfree_skb_any(skb);
2954 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2955 return NETDEV_TX_OK;
2956 }
2957
2958 q_depth = tcb->q_depth;
2959 prod = tcb->producer_index;
2960 unmap_q = tcb->unmap_q;
2961
2962 vectors = 1 + skb_shinfo(skb)->nr_frags;
2963 wis = BNA_TXQ_WI_NEEDED(vectors);
2964
2965 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2966 dev_kfree_skb_any(skb);
2967 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2968 return NETDEV_TX_OK;
2969 }
2970
2971
2972 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2973 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2974 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2975 u32 sent;
2976 sent = bnad_txcmpl_process(bnad, tcb);
2977 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2978 bna_ib_ack(tcb->i_dbell, sent);
2979 smp_mb__before_atomic();
2980 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2981 } else {
2982 netif_stop_queue(netdev);
2983 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2984 }
2985
2986 smp_mb();
2987
2988
2989
2990
2991
2992 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2993 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2994 return NETDEV_TX_BUSY;
2995 } else {
2996 netif_wake_queue(netdev);
2997 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2998 }
2999 }
3000
3001 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3002 head_unmap = &unmap_q[prod];
3003
3004
3005 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3006 dev_kfree_skb_any(skb);
3007 return NETDEV_TX_OK;
3008 }
3009 txqent->hdr.wi.reserved = 0;
3010 txqent->hdr.wi.num_vectors = vectors;
3011
3012 head_unmap->skb = skb;
3013 head_unmap->nvecs = 0;
3014
3015
3016 unmap = head_unmap;
3017 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3018 len, DMA_TO_DEVICE);
3019 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3020 dev_kfree_skb_any(skb);
3021 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3022 return NETDEV_TX_OK;
3023 }
3024 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3025 txqent->vector[0].length = htons(len);
3026 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3027 head_unmap->nvecs++;
3028
3029 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3030 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3031 u32 size = skb_frag_size(frag);
3032
3033 if (unlikely(size == 0)) {
3034
3035 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3036 tcb->producer_index);
3037 dev_kfree_skb_any(skb);
3038 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3039 return NETDEV_TX_OK;
3040 }
3041
3042 len += size;
3043
3044 vect_id++;
3045 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3046 vect_id = 0;
3047 BNA_QE_INDX_INC(prod, q_depth);
3048 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3049 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3050 unmap = &unmap_q[prod];
3051 }
3052
3053 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3054 0, size, DMA_TO_DEVICE);
3055 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3056
3057 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3058 tcb->producer_index);
3059 dev_kfree_skb_any(skb);
3060 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3061 return NETDEV_TX_OK;
3062 }
3063
3064 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3065 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3066 txqent->vector[vect_id].length = htons(size);
3067 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3068 dma_addr);
3069 head_unmap->nvecs++;
3070 }
3071
3072 if (unlikely(len != skb->len)) {
3073
3074 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3075 dev_kfree_skb_any(skb);
3076 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3077 return NETDEV_TX_OK;
3078 }
3079
3080 BNA_QE_INDX_INC(prod, q_depth);
3081 tcb->producer_index = prod;
3082
3083 wmb();
3084
3085 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3086 return NETDEV_TX_OK;
3087
3088 skb_tx_timestamp(skb);
3089
3090 bna_txq_prod_indx_doorbell(tcb);
3091
3092 return NETDEV_TX_OK;
3093}
3094
3095
3096
3097
3098
3099static void
3100bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3101{
3102 struct bnad *bnad = netdev_priv(netdev);
3103 unsigned long flags;
3104
3105 spin_lock_irqsave(&bnad->bna_lock, flags);
3106
3107 bnad_netdev_qstats_fill(bnad, stats);
3108 bnad_netdev_hwstats_fill(bnad, stats);
3109
3110 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3111}
3112
3113static void
3114bnad_set_rx_ucast_fltr(struct bnad *bnad)
3115{
3116 struct net_device *netdev = bnad->netdev;
3117 int uc_count = netdev_uc_count(netdev);
3118 enum bna_cb_status ret;
3119 u8 *mac_list;
3120 struct netdev_hw_addr *ha;
3121 int entry;
3122
3123 if (netdev_uc_empty(bnad->netdev)) {
3124 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3125 return;
3126 }
3127
3128 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3129 goto mode_default;
3130
3131 mac_list = kcalloc(ETH_ALEN, uc_count, GFP_ATOMIC);
3132 if (mac_list == NULL)
3133 goto mode_default;
3134
3135 entry = 0;
3136 netdev_for_each_uc_addr(ha, netdev) {
3137 ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
3138 entry++;
3139 }
3140
3141 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3142 kfree(mac_list);
3143
3144 if (ret != BNA_CB_SUCCESS)
3145 goto mode_default;
3146
3147 return;
3148
3149
3150mode_default:
3151 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3152 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3153}
3154
3155static void
3156bnad_set_rx_mcast_fltr(struct bnad *bnad)
3157{
3158 struct net_device *netdev = bnad->netdev;
3159 int mc_count = netdev_mc_count(netdev);
3160 enum bna_cb_status ret;
3161 u8 *mac_list;
3162
3163 if (netdev->flags & IFF_ALLMULTI)
3164 goto mode_allmulti;
3165
3166 if (netdev_mc_empty(netdev))
3167 return;
3168
3169 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3170 goto mode_allmulti;
3171
3172 mac_list = kcalloc(mc_count + 1, ETH_ALEN, GFP_ATOMIC);
3173
3174 if (mac_list == NULL)
3175 goto mode_allmulti;
3176
3177 ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
3178
3179
3180 bnad_netdev_mc_list_get(netdev, mac_list);
3181 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3182 kfree(mac_list);
3183
3184 if (ret != BNA_CB_SUCCESS)
3185 goto mode_allmulti;
3186
3187 return;
3188
3189mode_allmulti:
3190 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3191 bna_rx_mcast_delall(bnad->rx_info[0].rx);
3192}
3193
3194void
3195bnad_set_rx_mode(struct net_device *netdev)
3196{
3197 struct bnad *bnad = netdev_priv(netdev);
3198 enum bna_rxmode new_mode, mode_mask;
3199 unsigned long flags;
3200
3201 spin_lock_irqsave(&bnad->bna_lock, flags);
3202
3203 if (bnad->rx_info[0].rx == NULL) {
3204 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3205 return;
3206 }
3207
3208
3209 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3210 BNAD_CF_ALLMULTI);
3211
3212 new_mode = 0;
3213 if (netdev->flags & IFF_PROMISC) {
3214 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3215 bnad->cfg_flags |= BNAD_CF_PROMISC;
3216 } else {
3217 bnad_set_rx_mcast_fltr(bnad);
3218
3219 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3220 new_mode |= BNA_RXMODE_ALLMULTI;
3221
3222 bnad_set_rx_ucast_fltr(bnad);
3223
3224 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3225 new_mode |= BNA_RXMODE_DEFAULT;
3226 }
3227
3228 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3229 BNA_RXMODE_ALLMULTI;
3230 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3231
3232 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3233}
3234
3235
3236
3237
3238
3239
3240static int
3241bnad_set_mac_address(struct net_device *netdev, void *addr)
3242{
3243 int err;
3244 struct bnad *bnad = netdev_priv(netdev);
3245 struct sockaddr *sa = (struct sockaddr *)addr;
3246 unsigned long flags;
3247
3248 spin_lock_irqsave(&bnad->bna_lock, flags);
3249
3250 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3251 if (!err)
3252 ether_addr_copy(netdev->dev_addr, sa->sa_data);
3253
3254 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3255
3256 return err;
3257}
3258
3259static int
3260bnad_mtu_set(struct bnad *bnad, int frame_size)
3261{
3262 unsigned long flags;
3263
3264 init_completion(&bnad->bnad_completions.mtu_comp);
3265
3266 spin_lock_irqsave(&bnad->bna_lock, flags);
3267 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3268 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3269
3270 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3271
3272 return bnad->bnad_completions.mtu_comp_status;
3273}
3274
3275static int
3276bnad_change_mtu(struct net_device *netdev, int new_mtu)
3277{
3278 int err, mtu;
3279 struct bnad *bnad = netdev_priv(netdev);
3280 u32 frame, new_frame;
3281
3282 mutex_lock(&bnad->conf_mutex);
3283
3284 mtu = netdev->mtu;
3285 netdev->mtu = new_mtu;
3286
3287 frame = BNAD_FRAME_SIZE(mtu);
3288 new_frame = BNAD_FRAME_SIZE(new_mtu);
3289
3290
3291 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3292 netif_running(bnad->netdev)) {
3293
3294 if ((frame <= 4096 && new_frame > 4096) ||
3295 (frame > 4096 && new_frame <= 4096))
3296 bnad_reinit_rx(bnad);
3297 }
3298
3299 err = bnad_mtu_set(bnad, new_frame);
3300 if (err)
3301 err = -EBUSY;
3302
3303 mutex_unlock(&bnad->conf_mutex);
3304 return err;
3305}
3306
3307static int
3308bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3309{
3310 struct bnad *bnad = netdev_priv(netdev);
3311 unsigned long flags;
3312
3313 if (!bnad->rx_info[0].rx)
3314 return 0;
3315
3316 mutex_lock(&bnad->conf_mutex);
3317
3318 spin_lock_irqsave(&bnad->bna_lock, flags);
3319 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3320 set_bit(vid, bnad->active_vlans);
3321 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3322
3323 mutex_unlock(&bnad->conf_mutex);
3324
3325 return 0;
3326}
3327
3328static int
3329bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3330{
3331 struct bnad *bnad = netdev_priv(netdev);
3332 unsigned long flags;
3333
3334 if (!bnad->rx_info[0].rx)
3335 return 0;
3336
3337 mutex_lock(&bnad->conf_mutex);
3338
3339 spin_lock_irqsave(&bnad->bna_lock, flags);
3340 clear_bit(vid, bnad->active_vlans);
3341 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3342 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3343
3344 mutex_unlock(&bnad->conf_mutex);
3345
3346 return 0;
3347}
3348
3349static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3350{
3351 struct bnad *bnad = netdev_priv(dev);
3352 netdev_features_t changed = features ^ dev->features;
3353
3354 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3355 unsigned long flags;
3356
3357 spin_lock_irqsave(&bnad->bna_lock, flags);
3358
3359 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3360 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3361 else
3362 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3363
3364 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3365 }
3366
3367 return 0;
3368}
3369
3370#ifdef CONFIG_NET_POLL_CONTROLLER
3371static void
3372bnad_netpoll(struct net_device *netdev)
3373{
3374 struct bnad *bnad = netdev_priv(netdev);
3375 struct bnad_rx_info *rx_info;
3376 struct bnad_rx_ctrl *rx_ctrl;
3377 u32 curr_mask;
3378 int i, j;
3379
3380 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3381 bna_intx_disable(&bnad->bna, curr_mask);
3382 bnad_isr(bnad->pcidev->irq, netdev);
3383 bna_intx_enable(&bnad->bna, curr_mask);
3384 } else {
3385
3386
3387
3388
3389
3390
3391 for (i = 0; i < bnad->num_rx; i++) {
3392 rx_info = &bnad->rx_info[i];
3393 if (!rx_info->rx)
3394 continue;
3395 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3396 rx_ctrl = &rx_info->rx_ctrl[j];
3397 if (rx_ctrl->ccb)
3398 bnad_netif_rx_schedule_poll(bnad,
3399 rx_ctrl->ccb);
3400 }
3401 }
3402 }
3403}
3404#endif
3405
3406static const struct net_device_ops bnad_netdev_ops = {
3407 .ndo_open = bnad_open,
3408 .ndo_stop = bnad_stop,
3409 .ndo_start_xmit = bnad_start_xmit,
3410 .ndo_get_stats64 = bnad_get_stats64,
3411 .ndo_set_rx_mode = bnad_set_rx_mode,
3412 .ndo_validate_addr = eth_validate_addr,
3413 .ndo_set_mac_address = bnad_set_mac_address,
3414 .ndo_change_mtu = bnad_change_mtu,
3415 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3416 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3417 .ndo_set_features = bnad_set_features,
3418#ifdef CONFIG_NET_POLL_CONTROLLER
3419 .ndo_poll_controller = bnad_netpoll
3420#endif
3421};
3422
3423static void
3424bnad_netdev_init(struct bnad *bnad, bool using_dac)
3425{
3426 struct net_device *netdev = bnad->netdev;
3427
3428 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3429 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3430 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3431 NETIF_F_HW_VLAN_CTAG_RX;
3432
3433 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3434 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3435 NETIF_F_TSO | NETIF_F_TSO6;
3436
3437 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3438
3439 if (using_dac)
3440 netdev->features |= NETIF_F_HIGHDMA;
3441
3442 netdev->mem_start = bnad->mmio_start;
3443 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3444
3445
3446 netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
3447 netdev->max_mtu = BNAD_JUMBO_MTU;
3448
3449 netdev->netdev_ops = &bnad_netdev_ops;
3450 bnad_set_ethtool_ops(netdev);
3451}
3452
3453
3454
3455
3456
3457
3458
3459static int
3460bnad_init(struct bnad *bnad,
3461 struct pci_dev *pdev, struct net_device *netdev)
3462{
3463 unsigned long flags;
3464
3465 SET_NETDEV_DEV(netdev, &pdev->dev);
3466 pci_set_drvdata(pdev, netdev);
3467
3468 bnad->netdev = netdev;
3469 bnad->pcidev = pdev;
3470 bnad->mmio_start = pci_resource_start(pdev, 0);
3471 bnad->mmio_len = pci_resource_len(pdev, 0);
3472 bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len);
3473 if (!bnad->bar0) {
3474 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3475 return -ENOMEM;
3476 }
3477 dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3478 (unsigned long long) bnad->mmio_len);
3479
3480 spin_lock_irqsave(&bnad->bna_lock, flags);
3481 if (!bnad_msix_disable)
3482 bnad->cfg_flags = BNAD_CF_MSIX;
3483
3484 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3485
3486 bnad_q_num_init(bnad);
3487 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3488
3489 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3490 (bnad->num_rx * bnad->num_rxp_per_rx) +
3491 BNAD_MAILBOX_MSIX_VECTORS;
3492
3493 bnad->txq_depth = BNAD_TXQ_DEPTH;
3494 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3495
3496 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3497 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3498
3499 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3500 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3501 if (!bnad->work_q) {
3502 iounmap(bnad->bar0);
3503 return -ENOMEM;
3504 }
3505
3506 return 0;
3507}
3508
3509
3510
3511
3512
3513
3514static void
3515bnad_uninit(struct bnad *bnad)
3516{
3517 if (bnad->work_q) {
3518 flush_workqueue(bnad->work_q);
3519 destroy_workqueue(bnad->work_q);
3520 bnad->work_q = NULL;
3521 }
3522
3523 if (bnad->bar0)
3524 iounmap(bnad->bar0);
3525}
3526
3527
3528
3529
3530
3531
3532
3533static void
3534bnad_lock_init(struct bnad *bnad)
3535{
3536 spin_lock_init(&bnad->bna_lock);
3537 mutex_init(&bnad->conf_mutex);
3538}
3539
3540static void
3541bnad_lock_uninit(struct bnad *bnad)
3542{
3543 mutex_destroy(&bnad->conf_mutex);
3544}
3545
3546
3547static int
3548bnad_pci_init(struct bnad *bnad,
3549 struct pci_dev *pdev, bool *using_dac)
3550{
3551 int err;
3552
3553 err = pci_enable_device(pdev);
3554 if (err)
3555 return err;
3556 err = pci_request_regions(pdev, BNAD_NAME);
3557 if (err)
3558 goto disable_device;
3559 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3560 *using_dac = true;
3561 } else {
3562 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3563 if (err)
3564 goto release_regions;
3565 *using_dac = false;
3566 }
3567 pci_set_master(pdev);
3568 return 0;
3569
3570release_regions:
3571 pci_release_regions(pdev);
3572disable_device:
3573 pci_disable_device(pdev);
3574
3575 return err;
3576}
3577
3578static void
3579bnad_pci_uninit(struct pci_dev *pdev)
3580{
3581 pci_release_regions(pdev);
3582 pci_disable_device(pdev);
3583}
3584
3585static int
3586bnad_pci_probe(struct pci_dev *pdev,
3587 const struct pci_device_id *pcidev_id)
3588{
3589 bool using_dac;
3590 int err;
3591 struct bnad *bnad;
3592 struct bna *bna;
3593 struct net_device *netdev;
3594 struct bfa_pcidev pcidev_info;
3595 unsigned long flags;
3596
3597 mutex_lock(&bnad_fwimg_mutex);
3598 if (!cna_get_firmware_buf(pdev)) {
3599 mutex_unlock(&bnad_fwimg_mutex);
3600 dev_err(&pdev->dev, "failed to load firmware image!\n");
3601 return -ENODEV;
3602 }
3603 mutex_unlock(&bnad_fwimg_mutex);
3604
3605
3606
3607
3608
3609 netdev = alloc_etherdev(sizeof(struct bnad));
3610 if (!netdev) {
3611 err = -ENOMEM;
3612 return err;
3613 }
3614 bnad = netdev_priv(netdev);
3615 bnad_lock_init(bnad);
3616 bnad->id = atomic_inc_return(&bna_id) - 1;
3617
3618 mutex_lock(&bnad->conf_mutex);
3619
3620
3621
3622
3623
3624 using_dac = false;
3625 err = bnad_pci_init(bnad, pdev, &using_dac);
3626 if (err)
3627 goto unlock_mutex;
3628
3629
3630
3631
3632
3633 err = bnad_init(bnad, pdev, netdev);
3634 if (err)
3635 goto pci_uninit;
3636
3637
3638 bnad_netdev_init(bnad, using_dac);
3639
3640
3641 netif_carrier_off(netdev);
3642
3643
3644 if (bna_debugfs_enable)
3645 bnad_debugfs_init(bnad);
3646
3647
3648 spin_lock_irqsave(&bnad->bna_lock, flags);
3649 bna_res_req(&bnad->res_info[0]);
3650 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3651
3652
3653 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3654 if (err)
3655 goto drv_uninit;
3656
3657 bna = &bnad->bna;
3658
3659
3660 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3661 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3662 pcidev_info.device_id = bnad->pcidev->device;
3663 pcidev_info.pci_bar_kva = bnad->bar0;
3664
3665 spin_lock_irqsave(&bnad->bna_lock, flags);
3666 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3667 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3668
3669 bnad->stats.bna_stats = &bna->stats;
3670
3671 bnad_enable_msix(bnad);
3672 err = bnad_mbox_irq_alloc(bnad);
3673 if (err)
3674 goto res_free;
3675
3676
3677 timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0);
3678 timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0);
3679 timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0);
3680 timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3681 0);
3682
3683
3684
3685
3686
3687
3688 err = bnad_ioceth_enable(bnad);
3689 if (err) {
3690 dev_err(&pdev->dev, "initialization failed err=%d\n", err);
3691 goto probe_success;
3692 }
3693
3694 spin_lock_irqsave(&bnad->bna_lock, flags);
3695 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3696 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3697 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3698 bna_attr(bna)->num_rxp - 1);
3699 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3700 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3701 err = -EIO;
3702 }
3703 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3704 if (err)
3705 goto disable_ioceth;
3706
3707 spin_lock_irqsave(&bnad->bna_lock, flags);
3708 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3709 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3710
3711 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3712 if (err) {
3713 err = -EIO;
3714 goto disable_ioceth;
3715 }
3716
3717 spin_lock_irqsave(&bnad->bna_lock, flags);
3718 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3719 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3720
3721
3722 spin_lock_irqsave(&bnad->bna_lock, flags);
3723 bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3724 bnad_set_netdev_perm_addr(bnad);
3725 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3726
3727 mutex_unlock(&bnad->conf_mutex);
3728
3729
3730 err = register_netdev(netdev);
3731 if (err) {
3732 dev_err(&pdev->dev, "registering net device failed\n");
3733 goto probe_uninit;
3734 }
3735 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3736
3737 return 0;
3738
3739probe_success:
3740 mutex_unlock(&bnad->conf_mutex);
3741 return 0;
3742
3743probe_uninit:
3744 mutex_lock(&bnad->conf_mutex);
3745 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3746disable_ioceth:
3747 bnad_ioceth_disable(bnad);
3748 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3749 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3750 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3751 spin_lock_irqsave(&bnad->bna_lock, flags);
3752 bna_uninit(bna);
3753 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3754 bnad_mbox_irq_free(bnad);
3755 bnad_disable_msix(bnad);
3756res_free:
3757 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3758drv_uninit:
3759
3760 kfree(bnad->regdata);
3761 bnad_debugfs_uninit(bnad);
3762 bnad_uninit(bnad);
3763pci_uninit:
3764 bnad_pci_uninit(pdev);
3765unlock_mutex:
3766 mutex_unlock(&bnad->conf_mutex);
3767 bnad_lock_uninit(bnad);
3768 free_netdev(netdev);
3769 return err;
3770}
3771
3772static void
3773bnad_pci_remove(struct pci_dev *pdev)
3774{
3775 struct net_device *netdev = pci_get_drvdata(pdev);
3776 struct bnad *bnad;
3777 struct bna *bna;
3778 unsigned long flags;
3779
3780 if (!netdev)
3781 return;
3782
3783 bnad = netdev_priv(netdev);
3784 bna = &bnad->bna;
3785
3786 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3787 unregister_netdev(netdev);
3788
3789 mutex_lock(&bnad->conf_mutex);
3790 bnad_ioceth_disable(bnad);
3791 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3792 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3793 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3794 spin_lock_irqsave(&bnad->bna_lock, flags);
3795 bna_uninit(bna);
3796 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3797
3798 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3799 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3800 bnad_mbox_irq_free(bnad);
3801 bnad_disable_msix(bnad);
3802 bnad_pci_uninit(pdev);
3803 mutex_unlock(&bnad->conf_mutex);
3804 bnad_lock_uninit(bnad);
3805
3806 kfree(bnad->regdata);
3807 bnad_debugfs_uninit(bnad);
3808 bnad_uninit(bnad);
3809 free_netdev(netdev);
3810}
3811
3812static const struct pci_device_id bnad_pci_id_table[] = {
3813 {
3814 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3815 PCI_DEVICE_ID_BROCADE_CT),
3816 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3817 .class_mask = 0xffff00
3818 },
3819 {
3820 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3821 BFA_PCI_DEVICE_ID_CT2),
3822 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3823 .class_mask = 0xffff00
3824 },
3825 {0, },
3826};
3827
3828MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3829
3830static struct pci_driver bnad_pci_driver = {
3831 .name = BNAD_NAME,
3832 .id_table = bnad_pci_id_table,
3833 .probe = bnad_pci_probe,
3834 .remove = bnad_pci_remove,
3835};
3836
3837static int __init
3838bnad_module_init(void)
3839{
3840 int err;
3841
3842 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3843
3844 err = pci_register_driver(&bnad_pci_driver);
3845 if (err < 0) {
3846 pr_err("bna: PCI driver registration failed err=%d\n", err);
3847 return err;
3848 }
3849
3850 return 0;
3851}
3852
3853static void __exit
3854bnad_module_exit(void)
3855{
3856 pci_unregister_driver(&bnad_pci_driver);
3857 release_firmware(bfi_fw);
3858}
3859
3860module_init(bnad_module_init);
3861module_exit(bnad_module_exit);
3862
3863MODULE_AUTHOR("Brocade");
3864MODULE_LICENSE("GPL");
3865MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3866MODULE_FIRMWARE(CNA_FW_FILE_CT);
3867MODULE_FIRMWARE(CNA_FW_FILE_CT2);
3868