1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61
62#include <linux/kernel.h>
63#include <linux/string.h>
64#include <linux/errno.h>
65#include <linux/unistd.h>
66#include <linux/slab.h>
67#include <linux/interrupt.h>
68#include <linux/delay.h>
69#include <linux/netdevice.h>
70#include <linux/etherdevice.h>
71#include <linux/skbuff.h>
72#include <linux/if_vlan.h>
73#include <linux/spinlock.h>
74#include <linux/mm.h>
75#include <linux/of_address.h>
76#include <linux/of_irq.h>
77#include <linux/of_mdio.h>
78#include <linux/of_platform.h>
79#include <linux/ip.h>
80#include <linux/tcp.h>
81#include <linux/udp.h>
82#include <linux/in.h>
83#include <linux/net_tstamp.h>
84
85#include <asm/io.h>
86#ifdef CONFIG_PPC
87#include <asm/reg.h>
88#include <asm/mpc85xx.h>
89#endif
90#include <asm/irq.h>
91#include <linux/uaccess.h>
92#include <linux/module.h>
93#include <linux/dma-mapping.h>
94#include <linux/crc32.h>
95#include <linux/mii.h>
96#include <linux/phy.h>
97#include <linux/phy_fixed.h>
98#include <linux/of.h>
99#include <linux/of_net.h>
100
101#include "gianfar.h"
102
103#define TX_TIMEOUT (5*HZ)
104
105MODULE_AUTHOR("Freescale Semiconductor, Inc");
106MODULE_DESCRIPTION("Gianfar Ethernet Driver");
107MODULE_LICENSE("GPL");
108
109static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
110 dma_addr_t buf)
111{
112 u32 lstatus;
113
114 bdp->bufPtr = cpu_to_be32(buf);
115
116 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
117 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
118 lstatus |= BD_LFLAG(RXBD_WRAP);
119
120 gfar_wmb();
121
122 bdp->lstatus = cpu_to_be32(lstatus);
123}
124
125static void gfar_init_tx_rx_base(struct gfar_private *priv)
126{
127 struct gfar __iomem *regs = priv->gfargrp[0].regs;
128 u32 __iomem *baddr;
129 int i;
130
131 baddr = ®s->tbase0;
132 for (i = 0; i < priv->num_tx_queues; i++) {
133 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
134 baddr += 2;
135 }
136
137 baddr = ®s->rbase0;
138 for (i = 0; i < priv->num_rx_queues; i++) {
139 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
140 baddr += 2;
141 }
142}
143
144static void gfar_init_rqprm(struct gfar_private *priv)
145{
146 struct gfar __iomem *regs = priv->gfargrp[0].regs;
147 u32 __iomem *baddr;
148 int i;
149
150 baddr = ®s->rqprm0;
151 for (i = 0; i < priv->num_rx_queues; i++) {
152 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
153 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
154 baddr++;
155 }
156}
157
158static void gfar_rx_offload_en(struct gfar_private *priv)
159{
160
161 priv->uses_rxfcb = 0;
162
163 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
164 priv->uses_rxfcb = 1;
165
166 if (priv->hwts_rx_en || priv->rx_filer_enable)
167 priv->uses_rxfcb = 1;
168}
169
170static void gfar_mac_rx_config(struct gfar_private *priv)
171{
172 struct gfar __iomem *regs = priv->gfargrp[0].regs;
173 u32 rctrl = 0;
174
175 if (priv->rx_filer_enable) {
176 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
177
178 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
179 }
180
181
182 if (priv->ndev->flags & IFF_PROMISC)
183 rctrl |= RCTRL_PROM;
184
185 if (priv->ndev->features & NETIF_F_RXCSUM)
186 rctrl |= RCTRL_CHECKSUMMING;
187
188 if (priv->extended_hash)
189 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
190
191 if (priv->padding) {
192 rctrl &= ~RCTRL_PAL_MASK;
193 rctrl |= RCTRL_PADDING(priv->padding);
194 }
195
196
197 if (priv->hwts_rx_en)
198 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
199
200 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
201 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
202
203
204 gfar_write(®s->rctrl, rctrl);
205
206 gfar_init_rqprm(priv);
207 gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL);
208 rctrl |= RCTRL_LFC;
209
210
211 gfar_write(®s->rctrl, rctrl);
212}
213
214static void gfar_mac_tx_config(struct gfar_private *priv)
215{
216 struct gfar __iomem *regs = priv->gfargrp[0].regs;
217 u32 tctrl = 0;
218
219 if (priv->ndev->features & NETIF_F_IP_CSUM)
220 tctrl |= TCTRL_INIT_CSUM;
221
222 if (priv->prio_sched_en)
223 tctrl |= TCTRL_TXSCHED_PRIO;
224 else {
225 tctrl |= TCTRL_TXSCHED_WRRS;
226 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
227 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
228 }
229
230 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
231 tctrl |= TCTRL_VLINS;
232
233 gfar_write(®s->tctrl, tctrl);
234}
235
236static void gfar_configure_coalescing(struct gfar_private *priv,
237 unsigned long tx_mask, unsigned long rx_mask)
238{
239 struct gfar __iomem *regs = priv->gfargrp[0].regs;
240 u32 __iomem *baddr;
241
242 if (priv->mode == MQ_MG_MODE) {
243 int i = 0;
244
245 baddr = ®s->txic0;
246 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
247 gfar_write(baddr + i, 0);
248 if (likely(priv->tx_queue[i]->txcoalescing))
249 gfar_write(baddr + i, priv->tx_queue[i]->txic);
250 }
251
252 baddr = ®s->rxic0;
253 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
254 gfar_write(baddr + i, 0);
255 if (likely(priv->rx_queue[i]->rxcoalescing))
256 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
257 }
258 } else {
259
260
261
262 gfar_write(®s->txic, 0);
263 if (likely(priv->tx_queue[0]->txcoalescing))
264 gfar_write(®s->txic, priv->tx_queue[0]->txic);
265
266 gfar_write(®s->rxic, 0);
267 if (unlikely(priv->rx_queue[0]->rxcoalescing))
268 gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
269 }
270}
271
272static void gfar_configure_coalescing_all(struct gfar_private *priv)
273{
274 gfar_configure_coalescing(priv, 0xFF, 0xFF);
275}
276
277static struct net_device_stats *gfar_get_stats(struct net_device *dev)
278{
279 struct gfar_private *priv = netdev_priv(dev);
280 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
281 unsigned long tx_packets = 0, tx_bytes = 0;
282 int i;
283
284 for (i = 0; i < priv->num_rx_queues; i++) {
285 rx_packets += priv->rx_queue[i]->stats.rx_packets;
286 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
287 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
288 }
289
290 dev->stats.rx_packets = rx_packets;
291 dev->stats.rx_bytes = rx_bytes;
292 dev->stats.rx_dropped = rx_dropped;
293
294 for (i = 0; i < priv->num_tx_queues; i++) {
295 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
296 tx_packets += priv->tx_queue[i]->stats.tx_packets;
297 }
298
299 dev->stats.tx_bytes = tx_bytes;
300 dev->stats.tx_packets = tx_packets;
301
302 return &dev->stats;
303}
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
320{
321 u32 tempval;
322 struct gfar_private *priv = netdev_priv(dev);
323 u32 result = ether_crc(ETH_ALEN, addr);
324 int width = priv->hash_width;
325 u8 whichbit = (result >> (32 - width)) & 0x1f;
326 u8 whichreg = result >> (32 - width + 5);
327 u32 value = (1 << (31-whichbit));
328
329 tempval = gfar_read(priv->hash_regs[whichreg]);
330 tempval |= value;
331 gfar_write(priv->hash_regs[whichreg], tempval);
332}
333
334
335
336
337static void gfar_set_mac_for_addr(struct net_device *dev, int num,
338 const u8 *addr)
339{
340 struct gfar_private *priv = netdev_priv(dev);
341 struct gfar __iomem *regs = priv->gfargrp[0].regs;
342 u32 tempval;
343 u32 __iomem *macptr = ®s->macstnaddr1;
344
345 macptr += num*2;
346
347
348
349
350
351 tempval = (addr[5] << 24) | (addr[4] << 16) |
352 (addr[3] << 8) | addr[2];
353
354 gfar_write(macptr, tempval);
355
356 tempval = (addr[1] << 24) | (addr[0] << 16);
357
358 gfar_write(macptr+1, tempval);
359}
360
361static int gfar_set_mac_addr(struct net_device *dev, void *p)
362{
363 int ret;
364
365 ret = eth_mac_addr(dev, p);
366 if (ret)
367 return ret;
368
369 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
370
371 return 0;
372}
373
374static void gfar_ints_disable(struct gfar_private *priv)
375{
376 int i;
377 for (i = 0; i < priv->num_grps; i++) {
378 struct gfar __iomem *regs = priv->gfargrp[i].regs;
379
380 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
381
382
383 gfar_write(®s->imask, IMASK_INIT_CLEAR);
384 }
385}
386
387static void gfar_ints_enable(struct gfar_private *priv)
388{
389 int i;
390 for (i = 0; i < priv->num_grps; i++) {
391 struct gfar __iomem *regs = priv->gfargrp[i].regs;
392
393 gfar_write(®s->imask, IMASK_DEFAULT);
394 }
395}
396
397static int gfar_alloc_tx_queues(struct gfar_private *priv)
398{
399 int i;
400
401 for (i = 0; i < priv->num_tx_queues; i++) {
402 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
403 GFP_KERNEL);
404 if (!priv->tx_queue[i])
405 return -ENOMEM;
406
407 priv->tx_queue[i]->tx_skbuff = NULL;
408 priv->tx_queue[i]->qindex = i;
409 priv->tx_queue[i]->dev = priv->ndev;
410 spin_lock_init(&(priv->tx_queue[i]->txlock));
411 }
412 return 0;
413}
414
415static int gfar_alloc_rx_queues(struct gfar_private *priv)
416{
417 int i;
418
419 for (i = 0; i < priv->num_rx_queues; i++) {
420 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
421 GFP_KERNEL);
422 if (!priv->rx_queue[i])
423 return -ENOMEM;
424
425 priv->rx_queue[i]->qindex = i;
426 priv->rx_queue[i]->ndev = priv->ndev;
427 }
428 return 0;
429}
430
431static void gfar_free_tx_queues(struct gfar_private *priv)
432{
433 int i;
434
435 for (i = 0; i < priv->num_tx_queues; i++)
436 kfree(priv->tx_queue[i]);
437}
438
439static void gfar_free_rx_queues(struct gfar_private *priv)
440{
441 int i;
442
443 for (i = 0; i < priv->num_rx_queues; i++)
444 kfree(priv->rx_queue[i]);
445}
446
447static void unmap_group_regs(struct gfar_private *priv)
448{
449 int i;
450
451 for (i = 0; i < MAXGROUPS; i++)
452 if (priv->gfargrp[i].regs)
453 iounmap(priv->gfargrp[i].regs);
454}
455
456static void free_gfar_dev(struct gfar_private *priv)
457{
458 int i, j;
459
460 for (i = 0; i < priv->num_grps; i++)
461 for (j = 0; j < GFAR_NUM_IRQS; j++) {
462 kfree(priv->gfargrp[i].irqinfo[j]);
463 priv->gfargrp[i].irqinfo[j] = NULL;
464 }
465
466 free_netdev(priv->ndev);
467}
468
469static void disable_napi(struct gfar_private *priv)
470{
471 int i;
472
473 for (i = 0; i < priv->num_grps; i++) {
474 napi_disable(&priv->gfargrp[i].napi_rx);
475 napi_disable(&priv->gfargrp[i].napi_tx);
476 }
477}
478
479static void enable_napi(struct gfar_private *priv)
480{
481 int i;
482
483 for (i = 0; i < priv->num_grps; i++) {
484 napi_enable(&priv->gfargrp[i].napi_rx);
485 napi_enable(&priv->gfargrp[i].napi_tx);
486 }
487}
488
489static int gfar_parse_group(struct device_node *np,
490 struct gfar_private *priv, const char *model)
491{
492 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
493 int i;
494
495 for (i = 0; i < GFAR_NUM_IRQS; i++) {
496 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
497 GFP_KERNEL);
498 if (!grp->irqinfo[i])
499 return -ENOMEM;
500 }
501
502 grp->regs = of_iomap(np, 0);
503 if (!grp->regs)
504 return -ENOMEM;
505
506 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
507
508
509 if (model && strcasecmp(model, "FEC")) {
510 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
511 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
512 if (!gfar_irq(grp, TX)->irq ||
513 !gfar_irq(grp, RX)->irq ||
514 !gfar_irq(grp, ER)->irq)
515 return -EINVAL;
516 }
517
518 grp->priv = priv;
519 spin_lock_init(&grp->grplock);
520 if (priv->mode == MQ_MG_MODE) {
521
522 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
523 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
524 } else {
525 grp->rx_bit_map = 0xFF;
526 grp->tx_bit_map = 0xFF;
527 }
528
529
530
531
532 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
533 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
534
535
536
537
538 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
539 if (!grp->rx_queue)
540 grp->rx_queue = priv->rx_queue[i];
541 grp->num_rx_queues++;
542 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
543 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
544 priv->rx_queue[i]->grp = grp;
545 }
546
547 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
548 if (!grp->tx_queue)
549 grp->tx_queue = priv->tx_queue[i];
550 grp->num_tx_queues++;
551 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
552 priv->tqueue |= (TQUEUE_EN0 >> i);
553 priv->tx_queue[i]->grp = grp;
554 }
555
556 priv->num_grps++;
557
558 return 0;
559}
560
561static int gfar_of_group_count(struct device_node *np)
562{
563 struct device_node *child;
564 int num = 0;
565
566 for_each_available_child_of_node(np, child)
567 if (of_node_name_eq(child, "queue-group"))
568 num++;
569
570 return num;
571}
572
573
574
575
576static phy_interface_t gfar_get_interface(struct net_device *dev)
577{
578 struct gfar_private *priv = netdev_priv(dev);
579 struct gfar __iomem *regs = priv->gfargrp[0].regs;
580 u32 ecntrl;
581
582 ecntrl = gfar_read(®s->ecntrl);
583
584 if (ecntrl & ECNTRL_SGMII_MODE)
585 return PHY_INTERFACE_MODE_SGMII;
586
587 if (ecntrl & ECNTRL_TBI_MODE) {
588 if (ecntrl & ECNTRL_REDUCED_MODE)
589 return PHY_INTERFACE_MODE_RTBI;
590 else
591 return PHY_INTERFACE_MODE_TBI;
592 }
593
594 if (ecntrl & ECNTRL_REDUCED_MODE) {
595 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
596 return PHY_INTERFACE_MODE_RMII;
597 }
598 else {
599 phy_interface_t interface = priv->interface;
600
601
602
603
604 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
605 return PHY_INTERFACE_MODE_RGMII_ID;
606
607 return PHY_INTERFACE_MODE_RGMII;
608 }
609 }
610
611 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
612 return PHY_INTERFACE_MODE_GMII;
613
614 return PHY_INTERFACE_MODE_MII;
615}
616
617static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
618{
619 const char *model;
620 int err = 0, i;
621 phy_interface_t interface;
622 struct net_device *dev = NULL;
623 struct gfar_private *priv = NULL;
624 struct device_node *np = ofdev->dev.of_node;
625 struct device_node *child = NULL;
626 u32 stash_len = 0;
627 u32 stash_idx = 0;
628 unsigned int num_tx_qs, num_rx_qs;
629 unsigned short mode;
630
631 if (!np)
632 return -ENODEV;
633
634 if (of_device_is_compatible(np, "fsl,etsec2"))
635 mode = MQ_MG_MODE;
636 else
637 mode = SQ_SG_MODE;
638
639 if (mode == SQ_SG_MODE) {
640 num_tx_qs = 1;
641 num_rx_qs = 1;
642 } else {
643
644 unsigned int num_grps = gfar_of_group_count(np);
645
646 if (num_grps == 0 || num_grps > MAXGROUPS) {
647 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
648 num_grps);
649 pr_err("Cannot do alloc_etherdev, aborting\n");
650 return -EINVAL;
651 }
652
653 num_tx_qs = num_grps;
654 num_rx_qs = num_grps;
655 }
656
657 if (num_tx_qs > MAX_TX_QS) {
658 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
659 num_tx_qs, MAX_TX_QS);
660 pr_err("Cannot do alloc_etherdev, aborting\n");
661 return -EINVAL;
662 }
663
664 if (num_rx_qs > MAX_RX_QS) {
665 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
666 num_rx_qs, MAX_RX_QS);
667 pr_err("Cannot do alloc_etherdev, aborting\n");
668 return -EINVAL;
669 }
670
671 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
672 dev = *pdev;
673 if (NULL == dev)
674 return -ENOMEM;
675
676 priv = netdev_priv(dev);
677 priv->ndev = dev;
678
679 priv->mode = mode;
680
681 priv->num_tx_queues = num_tx_qs;
682 netif_set_real_num_rx_queues(dev, num_rx_qs);
683 priv->num_rx_queues = num_rx_qs;
684
685 err = gfar_alloc_tx_queues(priv);
686 if (err)
687 goto tx_alloc_failed;
688
689 err = gfar_alloc_rx_queues(priv);
690 if (err)
691 goto rx_alloc_failed;
692
693 err = of_property_read_string(np, "model", &model);
694 if (err) {
695 pr_err("Device model property missing, aborting\n");
696 goto rx_alloc_failed;
697 }
698
699
700 INIT_LIST_HEAD(&priv->rx_list.list);
701 priv->rx_list.count = 0;
702 mutex_init(&priv->rx_queue_access);
703
704 for (i = 0; i < MAXGROUPS; i++)
705 priv->gfargrp[i].regs = NULL;
706
707
708 if (priv->mode == MQ_MG_MODE) {
709 for_each_available_child_of_node(np, child) {
710 if (!of_node_name_eq(child, "queue-group"))
711 continue;
712
713 err = gfar_parse_group(child, priv, model);
714 if (err) {
715 of_node_put(child);
716 goto err_grp_init;
717 }
718 }
719 } else {
720 err = gfar_parse_group(np, priv, model);
721 if (err)
722 goto err_grp_init;
723 }
724
725 if (of_property_read_bool(np, "bd-stash")) {
726 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
727 priv->bd_stash_en = 1;
728 }
729
730 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
731
732 if (err == 0)
733 priv->rx_stash_size = stash_len;
734
735 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
736
737 if (err == 0)
738 priv->rx_stash_index = stash_idx;
739
740 if (stash_len || stash_idx)
741 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
742
743 err = of_get_mac_address(np, dev->dev_addr);
744 if (err) {
745 eth_hw_addr_random(dev);
746 dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
747 }
748
749 if (model && !strcasecmp(model, "TSEC"))
750 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
751 FSL_GIANFAR_DEV_HAS_COALESCE |
752 FSL_GIANFAR_DEV_HAS_RMON |
753 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
754
755 if (model && !strcasecmp(model, "eTSEC"))
756 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
757 FSL_GIANFAR_DEV_HAS_COALESCE |
758 FSL_GIANFAR_DEV_HAS_RMON |
759 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
760 FSL_GIANFAR_DEV_HAS_CSUM |
761 FSL_GIANFAR_DEV_HAS_VLAN |
762 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
763 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
764 FSL_GIANFAR_DEV_HAS_TIMER |
765 FSL_GIANFAR_DEV_HAS_RX_FILER;
766
767
768
769
770
771 err = of_get_phy_mode(np, &interface);
772 if (!err)
773 priv->interface = interface;
774 else
775 priv->interface = gfar_get_interface(dev);
776
777 if (of_find_property(np, "fsl,magic-packet", NULL))
778 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
779
780 if (of_get_property(np, "fsl,wake-on-filer", NULL))
781 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
782
783 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
784
785
786
787
788 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
789 err = of_phy_register_fixed_link(np);
790 if (err)
791 goto err_grp_init;
792
793 priv->phy_node = of_node_get(np);
794 }
795
796
797 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
798
799 return 0;
800
801err_grp_init:
802 unmap_group_regs(priv);
803rx_alloc_failed:
804 gfar_free_rx_queues(priv);
805tx_alloc_failed:
806 gfar_free_tx_queues(priv);
807 free_gfar_dev(priv);
808 return err;
809}
810
811static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
812 u32 class)
813{
814 u32 rqfpr = FPR_FILER_MASK;
815 u32 rqfcr = 0x0;
816
817 rqfar--;
818 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
819 priv->ftp_rqfpr[rqfar] = rqfpr;
820 priv->ftp_rqfcr[rqfar] = rqfcr;
821 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
822
823 rqfar--;
824 rqfcr = RQFCR_CMP_NOMATCH;
825 priv->ftp_rqfpr[rqfar] = rqfpr;
826 priv->ftp_rqfcr[rqfar] = rqfcr;
827 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
828
829 rqfar--;
830 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
831 rqfpr = class;
832 priv->ftp_rqfcr[rqfar] = rqfcr;
833 priv->ftp_rqfpr[rqfar] = rqfpr;
834 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
835
836 rqfar--;
837 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
838 rqfpr = class;
839 priv->ftp_rqfcr[rqfar] = rqfcr;
840 priv->ftp_rqfpr[rqfar] = rqfpr;
841 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
842
843 return rqfar;
844}
845
846static void gfar_init_filer_table(struct gfar_private *priv)
847{
848 int i = 0x0;
849 u32 rqfar = MAX_FILER_IDX;
850 u32 rqfcr = 0x0;
851 u32 rqfpr = FPR_FILER_MASK;
852
853
854 rqfcr = RQFCR_CMP_MATCH;
855 priv->ftp_rqfcr[rqfar] = rqfcr;
856 priv->ftp_rqfpr[rqfar] = rqfpr;
857 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
858
859 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
860 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
861 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
862 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
863 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
864 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
865
866
867 priv->cur_filer_idx = rqfar;
868
869
870 rqfcr = RQFCR_CMP_NOMATCH;
871 for (i = 0; i < rqfar; i++) {
872 priv->ftp_rqfcr[i] = rqfcr;
873 priv->ftp_rqfpr[i] = rqfpr;
874 gfar_write_filer(priv, i, rqfcr, rqfpr);
875 }
876}
877
878#ifdef CONFIG_PPC
879static void __gfar_detect_errata_83xx(struct gfar_private *priv)
880{
881 unsigned int pvr = mfspr(SPRN_PVR);
882 unsigned int svr = mfspr(SPRN_SVR);
883 unsigned int mod = (svr >> 16) & 0xfff6;
884 unsigned int rev = svr & 0xffff;
885
886
887 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
888 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
889 priv->errata |= GFAR_ERRATA_74;
890
891
892 if ((pvr == 0x80850010 && mod == 0x80b0) ||
893 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
894 priv->errata |= GFAR_ERRATA_76;
895
896
897 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
898 priv->errata |= GFAR_ERRATA_12;
899}
900
901static void __gfar_detect_errata_85xx(struct gfar_private *priv)
902{
903 unsigned int svr = mfspr(SPRN_SVR);
904
905 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
906 priv->errata |= GFAR_ERRATA_12;
907
908 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
909 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
910 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
911 priv->errata |= GFAR_ERRATA_76;
912}
913#endif
914
915static void gfar_detect_errata(struct gfar_private *priv)
916{
917 struct device *dev = &priv->ofdev->dev;
918
919
920 priv->errata |= GFAR_ERRATA_A002;
921
922#ifdef CONFIG_PPC
923 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
924 __gfar_detect_errata_85xx(priv);
925 else
926 __gfar_detect_errata_83xx(priv);
927#endif
928
929 if (priv->errata)
930 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
931 priv->errata);
932}
933
934static void gfar_init_addr_hash_table(struct gfar_private *priv)
935{
936 struct gfar __iomem *regs = priv->gfargrp[0].regs;
937
938 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
939 priv->extended_hash = 1;
940 priv->hash_width = 9;
941
942 priv->hash_regs[0] = ®s->igaddr0;
943 priv->hash_regs[1] = ®s->igaddr1;
944 priv->hash_regs[2] = ®s->igaddr2;
945 priv->hash_regs[3] = ®s->igaddr3;
946 priv->hash_regs[4] = ®s->igaddr4;
947 priv->hash_regs[5] = ®s->igaddr5;
948 priv->hash_regs[6] = ®s->igaddr6;
949 priv->hash_regs[7] = ®s->igaddr7;
950 priv->hash_regs[8] = ®s->gaddr0;
951 priv->hash_regs[9] = ®s->gaddr1;
952 priv->hash_regs[10] = ®s->gaddr2;
953 priv->hash_regs[11] = ®s->gaddr3;
954 priv->hash_regs[12] = ®s->gaddr4;
955 priv->hash_regs[13] = ®s->gaddr5;
956 priv->hash_regs[14] = ®s->gaddr6;
957 priv->hash_regs[15] = ®s->gaddr7;
958
959 } else {
960 priv->extended_hash = 0;
961 priv->hash_width = 8;
962
963 priv->hash_regs[0] = ®s->gaddr0;
964 priv->hash_regs[1] = ®s->gaddr1;
965 priv->hash_regs[2] = ®s->gaddr2;
966 priv->hash_regs[3] = ®s->gaddr3;
967 priv->hash_regs[4] = ®s->gaddr4;
968 priv->hash_regs[5] = ®s->gaddr5;
969 priv->hash_regs[6] = ®s->gaddr6;
970 priv->hash_regs[7] = ®s->gaddr7;
971 }
972}
973
974static int __gfar_is_rx_idle(struct gfar_private *priv)
975{
976 u32 res;
977
978
979
980
981 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
982 return 0;
983
984
985
986
987
988 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
989 res &= 0x7f807f80;
990 if ((res & 0xffff) == (res >> 16))
991 return 1;
992
993 return 0;
994}
995
996
997static void gfar_halt_nodisable(struct gfar_private *priv)
998{
999 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1000 u32 tempval;
1001 unsigned int timeout;
1002 int stopped;
1003
1004 gfar_ints_disable(priv);
1005
1006 if (gfar_is_dma_stopped(priv))
1007 return;
1008
1009
1010 tempval = gfar_read(®s->dmactrl);
1011 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1012 gfar_write(®s->dmactrl, tempval);
1013
1014retry:
1015 timeout = 1000;
1016 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1017 cpu_relax();
1018 timeout--;
1019 }
1020
1021 if (!timeout)
1022 stopped = gfar_is_dma_stopped(priv);
1023
1024 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1025 !__gfar_is_rx_idle(priv))
1026 goto retry;
1027}
1028
1029
1030static void gfar_halt(struct gfar_private *priv)
1031{
1032 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1033 u32 tempval;
1034
1035
1036 gfar_write(®s->rqueue, 0);
1037 gfar_write(®s->tqueue, 0);
1038
1039 mdelay(10);
1040
1041 gfar_halt_nodisable(priv);
1042
1043
1044 tempval = gfar_read(®s->maccfg1);
1045 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1046 gfar_write(®s->maccfg1, tempval);
1047}
1048
1049static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1050{
1051 struct txbd8 *txbdp;
1052 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1053 int i, j;
1054
1055 txbdp = tx_queue->tx_bd_base;
1056
1057 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1058 if (!tx_queue->tx_skbuff[i])
1059 continue;
1060
1061 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1062 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1063 txbdp->lstatus = 0;
1064 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1065 j++) {
1066 txbdp++;
1067 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1068 be16_to_cpu(txbdp->length),
1069 DMA_TO_DEVICE);
1070 }
1071 txbdp++;
1072 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1073 tx_queue->tx_skbuff[i] = NULL;
1074 }
1075 kfree(tx_queue->tx_skbuff);
1076 tx_queue->tx_skbuff = NULL;
1077}
1078
1079static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1080{
1081 int i;
1082
1083 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
1084
1085 dev_kfree_skb(rx_queue->skb);
1086
1087 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1088 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
1089
1090 rxbdp->lstatus = 0;
1091 rxbdp->bufPtr = 0;
1092 rxbdp++;
1093
1094 if (!rxb->page)
1095 continue;
1096
1097 dma_unmap_page(rx_queue->dev, rxb->dma,
1098 PAGE_SIZE, DMA_FROM_DEVICE);
1099 __free_page(rxb->page);
1100
1101 rxb->page = NULL;
1102 }
1103
1104 kfree(rx_queue->rx_buff);
1105 rx_queue->rx_buff = NULL;
1106}
1107
1108
1109
1110
1111static void free_skb_resources(struct gfar_private *priv)
1112{
1113 struct gfar_priv_tx_q *tx_queue = NULL;
1114 struct gfar_priv_rx_q *rx_queue = NULL;
1115 int i;
1116
1117
1118 for (i = 0; i < priv->num_tx_queues; i++) {
1119 struct netdev_queue *txq;
1120
1121 tx_queue = priv->tx_queue[i];
1122 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1123 if (tx_queue->tx_skbuff)
1124 free_skb_tx_queue(tx_queue);
1125 netdev_tx_reset_queue(txq);
1126 }
1127
1128 for (i = 0; i < priv->num_rx_queues; i++) {
1129 rx_queue = priv->rx_queue[i];
1130 if (rx_queue->rx_buff)
1131 free_skb_rx_queue(rx_queue);
1132 }
1133
1134 dma_free_coherent(priv->dev,
1135 sizeof(struct txbd8) * priv->total_tx_ring_size +
1136 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1137 priv->tx_queue[0]->tx_bd_base,
1138 priv->tx_queue[0]->tx_bd_dma_base);
1139}
1140
1141void stop_gfar(struct net_device *dev)
1142{
1143 struct gfar_private *priv = netdev_priv(dev);
1144
1145 netif_tx_stop_all_queues(dev);
1146
1147 smp_mb__before_atomic();
1148 set_bit(GFAR_DOWN, &priv->state);
1149 smp_mb__after_atomic();
1150
1151 disable_napi(priv);
1152
1153
1154 gfar_halt(priv);
1155
1156 phy_stop(dev->phydev);
1157
1158 free_skb_resources(priv);
1159}
1160
1161static void gfar_start(struct gfar_private *priv)
1162{
1163 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1164 u32 tempval;
1165 int i = 0;
1166
1167
1168 gfar_write(®s->rqueue, priv->rqueue);
1169 gfar_write(®s->tqueue, priv->tqueue);
1170
1171
1172 tempval = gfar_read(®s->dmactrl);
1173 tempval |= DMACTRL_INIT_SETTINGS;
1174 gfar_write(®s->dmactrl, tempval);
1175
1176
1177 tempval = gfar_read(®s->dmactrl);
1178 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1179 gfar_write(®s->dmactrl, tempval);
1180
1181 for (i = 0; i < priv->num_grps; i++) {
1182 regs = priv->gfargrp[i].regs;
1183
1184 gfar_write(®s->tstat, priv->gfargrp[i].tstat);
1185 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1186 }
1187
1188
1189 tempval = gfar_read(®s->maccfg1);
1190 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1191 gfar_write(®s->maccfg1, tempval);
1192
1193 gfar_ints_enable(priv);
1194
1195 netif_trans_update(priv->ndev);
1196}
1197
1198static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
1199{
1200 struct page *page;
1201 dma_addr_t addr;
1202
1203 page = dev_alloc_page();
1204 if (unlikely(!page))
1205 return false;
1206
1207 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1208 if (unlikely(dma_mapping_error(rxq->dev, addr))) {
1209 __free_page(page);
1210
1211 return false;
1212 }
1213
1214 rxb->dma = addr;
1215 rxb->page = page;
1216 rxb->page_offset = 0;
1217
1218 return true;
1219}
1220
1221static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1222{
1223 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
1224 struct gfar_extra_stats *estats = &priv->extra_stats;
1225
1226 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
1227 atomic64_inc(&estats->rx_alloc_err);
1228}
1229
1230static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
1231 int alloc_cnt)
1232{
1233 struct rxbd8 *bdp;
1234 struct gfar_rx_buff *rxb;
1235 int i;
1236
1237 i = rx_queue->next_to_use;
1238 bdp = &rx_queue->rx_bd_base[i];
1239 rxb = &rx_queue->rx_buff[i];
1240
1241 while (alloc_cnt--) {
1242
1243 if (unlikely(!rxb->page)) {
1244 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
1245 gfar_rx_alloc_err(rx_queue);
1246 break;
1247 }
1248 }
1249
1250
1251 gfar_init_rxbdp(rx_queue, bdp,
1252 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
1253
1254
1255 bdp++;
1256 rxb++;
1257
1258 if (unlikely(++i == rx_queue->rx_ring_size)) {
1259 i = 0;
1260 bdp = rx_queue->rx_bd_base;
1261 rxb = rx_queue->rx_buff;
1262 }
1263 }
1264
1265 rx_queue->next_to_use = i;
1266 rx_queue->next_to_alloc = i;
1267}
1268
1269static void gfar_init_bds(struct net_device *ndev)
1270{
1271 struct gfar_private *priv = netdev_priv(ndev);
1272 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1273 struct gfar_priv_tx_q *tx_queue = NULL;
1274 struct gfar_priv_rx_q *rx_queue = NULL;
1275 struct txbd8 *txbdp;
1276 u32 __iomem *rfbptr;
1277 int i, j;
1278
1279 for (i = 0; i < priv->num_tx_queues; i++) {
1280 tx_queue = priv->tx_queue[i];
1281
1282 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
1283 tx_queue->dirty_tx = tx_queue->tx_bd_base;
1284 tx_queue->cur_tx = tx_queue->tx_bd_base;
1285 tx_queue->skb_curtx = 0;
1286 tx_queue->skb_dirtytx = 0;
1287
1288
1289 txbdp = tx_queue->tx_bd_base;
1290 for (j = 0; j < tx_queue->tx_ring_size; j++) {
1291 txbdp->lstatus = 0;
1292 txbdp->bufPtr = 0;
1293 txbdp++;
1294 }
1295
1296
1297 txbdp--;
1298 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
1299 TXBD_WRAP);
1300 }
1301
1302 rfbptr = ®s->rfbptr0;
1303 for (i = 0; i < priv->num_rx_queues; i++) {
1304 rx_queue = priv->rx_queue[i];
1305
1306 rx_queue->next_to_clean = 0;
1307 rx_queue->next_to_use = 0;
1308 rx_queue->next_to_alloc = 0;
1309
1310
1311
1312
1313 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
1314
1315 rx_queue->rfbptr = rfbptr;
1316 rfbptr += 2;
1317 }
1318}
1319
1320static int gfar_alloc_skb_resources(struct net_device *ndev)
1321{
1322 void *vaddr;
1323 dma_addr_t addr;
1324 int i, j;
1325 struct gfar_private *priv = netdev_priv(ndev);
1326 struct device *dev = priv->dev;
1327 struct gfar_priv_tx_q *tx_queue = NULL;
1328 struct gfar_priv_rx_q *rx_queue = NULL;
1329
1330 priv->total_tx_ring_size = 0;
1331 for (i = 0; i < priv->num_tx_queues; i++)
1332 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
1333
1334 priv->total_rx_ring_size = 0;
1335 for (i = 0; i < priv->num_rx_queues; i++)
1336 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
1337
1338
1339 vaddr = dma_alloc_coherent(dev,
1340 (priv->total_tx_ring_size *
1341 sizeof(struct txbd8)) +
1342 (priv->total_rx_ring_size *
1343 sizeof(struct rxbd8)),
1344 &addr, GFP_KERNEL);
1345 if (!vaddr)
1346 return -ENOMEM;
1347
1348 for (i = 0; i < priv->num_tx_queues; i++) {
1349 tx_queue = priv->tx_queue[i];
1350 tx_queue->tx_bd_base = vaddr;
1351 tx_queue->tx_bd_dma_base = addr;
1352 tx_queue->dev = ndev;
1353
1354 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1355 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1356 }
1357
1358
1359 for (i = 0; i < priv->num_rx_queues; i++) {
1360 rx_queue = priv->rx_queue[i];
1361 rx_queue->rx_bd_base = vaddr;
1362 rx_queue->rx_bd_dma_base = addr;
1363 rx_queue->ndev = ndev;
1364 rx_queue->dev = dev;
1365 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1366 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1367 }
1368
1369
1370 for (i = 0; i < priv->num_tx_queues; i++) {
1371 tx_queue = priv->tx_queue[i];
1372 tx_queue->tx_skbuff =
1373 kmalloc_array(tx_queue->tx_ring_size,
1374 sizeof(*tx_queue->tx_skbuff),
1375 GFP_KERNEL);
1376 if (!tx_queue->tx_skbuff)
1377 goto cleanup;
1378
1379 for (j = 0; j < tx_queue->tx_ring_size; j++)
1380 tx_queue->tx_skbuff[j] = NULL;
1381 }
1382
1383 for (i = 0; i < priv->num_rx_queues; i++) {
1384 rx_queue = priv->rx_queue[i];
1385 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
1386 sizeof(*rx_queue->rx_buff),
1387 GFP_KERNEL);
1388 if (!rx_queue->rx_buff)
1389 goto cleanup;
1390 }
1391
1392 gfar_init_bds(ndev);
1393
1394 return 0;
1395
1396cleanup:
1397 free_skb_resources(priv);
1398 return -ENOMEM;
1399}
1400
1401
1402int startup_gfar(struct net_device *ndev)
1403{
1404 struct gfar_private *priv = netdev_priv(ndev);
1405 int err;
1406
1407 gfar_mac_reset(priv);
1408
1409 err = gfar_alloc_skb_resources(ndev);
1410 if (err)
1411 return err;
1412
1413 gfar_init_tx_rx_base(priv);
1414
1415 smp_mb__before_atomic();
1416 clear_bit(GFAR_DOWN, &priv->state);
1417 smp_mb__after_atomic();
1418
1419
1420 gfar_start(priv);
1421
1422
1423 priv->oldlink = 0;
1424 priv->oldspeed = 0;
1425 priv->oldduplex = -1;
1426
1427 phy_start(ndev->phydev);
1428
1429 enable_napi(priv);
1430
1431 netif_tx_wake_all_queues(ndev);
1432
1433 return 0;
1434}
1435
1436static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
1437{
1438 struct net_device *ndev = priv->ndev;
1439 struct phy_device *phydev = ndev->phydev;
1440 u32 val = 0;
1441
1442 if (!phydev->duplex)
1443 return val;
1444
1445 if (!priv->pause_aneg_en) {
1446 if (priv->tx_pause_en)
1447 val |= MACCFG1_TX_FLOW;
1448 if (priv->rx_pause_en)
1449 val |= MACCFG1_RX_FLOW;
1450 } else {
1451 u16 lcl_adv, rmt_adv;
1452 u8 flowctrl;
1453
1454 rmt_adv = 0;
1455 if (phydev->pause)
1456 rmt_adv = LPA_PAUSE_CAP;
1457 if (phydev->asym_pause)
1458 rmt_adv |= LPA_PAUSE_ASYM;
1459
1460 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1461 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1462 if (flowctrl & FLOW_CTRL_TX)
1463 val |= MACCFG1_TX_FLOW;
1464 if (flowctrl & FLOW_CTRL_RX)
1465 val |= MACCFG1_RX_FLOW;
1466 }
1467
1468 return val;
1469}
1470
1471static noinline void gfar_update_link_state(struct gfar_private *priv)
1472{
1473 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1474 struct net_device *ndev = priv->ndev;
1475 struct phy_device *phydev = ndev->phydev;
1476 struct gfar_priv_rx_q *rx_queue = NULL;
1477 int i;
1478
1479 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
1480 return;
1481
1482 if (phydev->link) {
1483 u32 tempval1 = gfar_read(®s->maccfg1);
1484 u32 tempval = gfar_read(®s->maccfg2);
1485 u32 ecntrl = gfar_read(®s->ecntrl);
1486 u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
1487
1488 if (phydev->duplex != priv->oldduplex) {
1489 if (!(phydev->duplex))
1490 tempval &= ~(MACCFG2_FULL_DUPLEX);
1491 else
1492 tempval |= MACCFG2_FULL_DUPLEX;
1493
1494 priv->oldduplex = phydev->duplex;
1495 }
1496
1497 if (phydev->speed != priv->oldspeed) {
1498 switch (phydev->speed) {
1499 case 1000:
1500 tempval =
1501 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1502
1503 ecntrl &= ~(ECNTRL_R100);
1504 break;
1505 case 100:
1506 case 10:
1507 tempval =
1508 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1509
1510
1511
1512
1513 if (phydev->speed == SPEED_100)
1514 ecntrl |= ECNTRL_R100;
1515 else
1516 ecntrl &= ~(ECNTRL_R100);
1517 break;
1518 default:
1519 netif_warn(priv, link, priv->ndev,
1520 "Ack! Speed (%d) is not 10/100/1000!\n",
1521 phydev->speed);
1522 break;
1523 }
1524
1525 priv->oldspeed = phydev->speed;
1526 }
1527
1528 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1529 tempval1 |= gfar_get_flowctrl_cfg(priv);
1530
1531
1532 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
1533 for (i = 0; i < priv->num_rx_queues; i++) {
1534 u32 bdp_dma;
1535
1536 rx_queue = priv->rx_queue[i];
1537 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
1538 gfar_write(rx_queue->rfbptr, bdp_dma);
1539 }
1540
1541 priv->tx_actual_en = 1;
1542 }
1543
1544 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
1545 priv->tx_actual_en = 0;
1546
1547 gfar_write(®s->maccfg1, tempval1);
1548 gfar_write(®s->maccfg2, tempval);
1549 gfar_write(®s->ecntrl, ecntrl);
1550
1551 if (!priv->oldlink)
1552 priv->oldlink = 1;
1553
1554 } else if (priv->oldlink) {
1555 priv->oldlink = 0;
1556 priv->oldspeed = 0;
1557 priv->oldduplex = -1;
1558 }
1559
1560 if (netif_msg_link(priv))
1561 phy_print_status(phydev);
1562}
1563
1564
1565
1566
1567
1568
1569
1570static void adjust_link(struct net_device *dev)
1571{
1572 struct gfar_private *priv = netdev_priv(dev);
1573 struct phy_device *phydev = dev->phydev;
1574
1575 if (unlikely(phydev->link != priv->oldlink ||
1576 (phydev->link && (phydev->duplex != priv->oldduplex ||
1577 phydev->speed != priv->oldspeed))))
1578 gfar_update_link_state(priv);
1579}
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589static void gfar_configure_serdes(struct net_device *dev)
1590{
1591 struct gfar_private *priv = netdev_priv(dev);
1592 struct phy_device *tbiphy;
1593
1594 if (!priv->tbi_node) {
1595 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1596 "device tree specify a tbi-handle\n");
1597 return;
1598 }
1599
1600 tbiphy = of_phy_find_device(priv->tbi_node);
1601 if (!tbiphy) {
1602 dev_err(&dev->dev, "error: Could not get TBI device\n");
1603 return;
1604 }
1605
1606
1607
1608
1609
1610
1611 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1612 put_device(&tbiphy->mdio.dev);
1613 return;
1614 }
1615
1616
1617 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1618
1619 phy_write(tbiphy, MII_ADVERTISE,
1620 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1621 ADVERTISE_1000XPSE_ASYM);
1622
1623 phy_write(tbiphy, MII_BMCR,
1624 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1625 BMCR_SPEED1000);
1626
1627 put_device(&tbiphy->mdio.dev);
1628}
1629
1630
1631
1632
1633static int init_phy(struct net_device *dev)
1634{
1635 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1636 struct gfar_private *priv = netdev_priv(dev);
1637 phy_interface_t interface = priv->interface;
1638 struct phy_device *phydev;
1639 struct ethtool_eee edata;
1640
1641 linkmode_set_bit_array(phy_10_100_features_array,
1642 ARRAY_SIZE(phy_10_100_features_array),
1643 mask);
1644 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
1645 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
1646 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1647 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
1648
1649 priv->oldlink = 0;
1650 priv->oldspeed = 0;
1651 priv->oldduplex = -1;
1652
1653 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1654 interface);
1655 if (!phydev) {
1656 dev_err(&dev->dev, "could not attach to PHY\n");
1657 return -ENODEV;
1658 }
1659
1660 if (interface == PHY_INTERFACE_MODE_SGMII)
1661 gfar_configure_serdes(dev);
1662
1663
1664 linkmode_and(phydev->supported, phydev->supported, mask);
1665 linkmode_copy(phydev->advertising, phydev->supported);
1666
1667
1668 phy_support_asym_pause(phydev);
1669
1670
1671 memset(&edata, 0, sizeof(struct ethtool_eee));
1672 phy_ethtool_set_eee(phydev, &edata);
1673
1674 return 0;
1675}
1676
1677static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1678{
1679 struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
1680
1681 memset(fcb, 0, GMAC_FCB_LEN);
1682
1683 return fcb;
1684}
1685
1686static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1687 int fcb_length)
1688{
1689
1690
1691
1692
1693 u8 flags = TXFCB_DEFAULT;
1694
1695
1696
1697
1698 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1699 flags |= TXFCB_UDP;
1700 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
1701 } else
1702 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
1703
1704
1705
1706
1707
1708
1709 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
1710 fcb->l4os = skb_network_header_len(skb);
1711
1712 fcb->flags = flags;
1713}
1714
1715static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1716{
1717 fcb->flags |= TXFCB_VLN;
1718 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
1719}
1720
1721static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1722 struct txbd8 *base, int ring_size)
1723{
1724 struct txbd8 *new_bd = bdp + stride;
1725
1726 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1727}
1728
1729static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1730 int ring_size)
1731{
1732 return skip_txbd(bdp, 1, base, ring_size);
1733}
1734
1735
1736static inline bool gfar_csum_errata_12(struct gfar_private *priv,
1737 unsigned long fcb_addr)
1738{
1739 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
1740 (fcb_addr % 0x20) > 0x18);
1741}
1742
1743
1744
1745
1746static inline bool gfar_csum_errata_76(struct gfar_private *priv,
1747 unsigned int len)
1748{
1749 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
1750 (len > 2500));
1751}
1752
1753
1754
1755
1756static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1757{
1758 struct gfar_private *priv = netdev_priv(dev);
1759 struct gfar_priv_tx_q *tx_queue = NULL;
1760 struct netdev_queue *txq;
1761 struct gfar __iomem *regs = NULL;
1762 struct txfcb *fcb = NULL;
1763 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1764 u32 lstatus;
1765 skb_frag_t *frag;
1766 int i, rq = 0;
1767 int do_tstamp, do_csum, do_vlan;
1768 u32 bufaddr;
1769 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
1770
1771 rq = skb->queue_mapping;
1772 tx_queue = priv->tx_queue[rq];
1773 txq = netdev_get_tx_queue(dev, rq);
1774 base = tx_queue->tx_bd_base;
1775 regs = tx_queue->grp->regs;
1776
1777 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
1778 do_vlan = skb_vlan_tag_present(skb);
1779 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1780 priv->hwts_tx_en;
1781
1782 if (do_csum || do_vlan)
1783 fcb_len = GMAC_FCB_LEN;
1784
1785
1786 if (unlikely(do_tstamp))
1787 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
1788
1789
1790 if (fcb_len) {
1791 if (unlikely(skb_cow_head(skb, fcb_len))) {
1792 dev->stats.tx_errors++;
1793 dev_kfree_skb_any(skb);
1794 return NETDEV_TX_OK;
1795 }
1796 }
1797
1798
1799 nr_frags = skb_shinfo(skb)->nr_frags;
1800
1801
1802 if (unlikely(do_tstamp))
1803 nr_txbds = nr_frags + 2;
1804 else
1805 nr_txbds = nr_frags + 1;
1806
1807
1808 if (nr_txbds > tx_queue->num_txbdfree) {
1809
1810 netif_tx_stop_queue(txq);
1811 dev->stats.tx_fifo_errors++;
1812 return NETDEV_TX_BUSY;
1813 }
1814
1815
1816 bytes_sent = skb->len;
1817 tx_queue->stats.tx_bytes += bytes_sent;
1818
1819 GFAR_CB(skb)->bytes_sent = bytes_sent;
1820 tx_queue->stats.tx_packets++;
1821
1822 txbdp = txbdp_start = tx_queue->cur_tx;
1823 lstatus = be32_to_cpu(txbdp->lstatus);
1824
1825
1826 if (unlikely(do_tstamp)) {
1827 skb_push(skb, GMAC_TXPAL_LEN);
1828 memset(skb->data, 0, GMAC_TXPAL_LEN);
1829 }
1830
1831
1832 if (fcb_len) {
1833 fcb = gfar_add_fcb(skb);
1834 lstatus |= BD_LFLAG(TXBD_TOE);
1835 }
1836
1837
1838 if (do_csum) {
1839 gfar_tx_checksum(skb, fcb, fcb_len);
1840
1841 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
1842 unlikely(gfar_csum_errata_76(priv, skb->len))) {
1843 __skb_pull(skb, GMAC_FCB_LEN);
1844 skb_checksum_help(skb);
1845 if (do_vlan || do_tstamp) {
1846
1847 fcb = gfar_add_fcb(skb);
1848 } else {
1849
1850 lstatus &= ~(BD_LFLAG(TXBD_TOE));
1851 fcb = NULL;
1852 }
1853 }
1854 }
1855
1856 if (do_vlan)
1857 gfar_tx_vlan(skb, fcb);
1858
1859 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
1860 DMA_TO_DEVICE);
1861 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1862 goto dma_map_err;
1863
1864 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
1865
1866
1867 if (unlikely(do_tstamp))
1868 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
1869 tx_queue->tx_ring_size);
1870
1871 if (likely(!nr_frags)) {
1872 if (likely(!do_tstamp))
1873 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1874 } else {
1875 u32 lstatus_start = lstatus;
1876
1877
1878 frag = &skb_shinfo(skb)->frags[0];
1879 for (i = 0; i < nr_frags; i++, frag++) {
1880 unsigned int size;
1881
1882
1883 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1884
1885 size = skb_frag_size(frag);
1886
1887 lstatus = be32_to_cpu(txbdp->lstatus) | size |
1888 BD_LFLAG(TXBD_READY);
1889
1890
1891 if (i == nr_frags - 1)
1892 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1893
1894 bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
1895 size, DMA_TO_DEVICE);
1896 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1897 goto dma_map_err;
1898
1899
1900 txbdp->bufPtr = cpu_to_be32(bufaddr);
1901 txbdp->lstatus = cpu_to_be32(lstatus);
1902 }
1903
1904 lstatus = lstatus_start;
1905 }
1906
1907
1908
1909
1910
1911
1912 if (unlikely(do_tstamp)) {
1913 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
1914
1915 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
1916 bufaddr += fcb_len;
1917
1918 lstatus_ts |= BD_LFLAG(TXBD_READY) |
1919 (skb_headlen(skb) - fcb_len);
1920 if (!nr_frags)
1921 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1922
1923 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
1924 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
1925 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
1926
1927
1928 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1929 fcb->ptp = 1;
1930 } else {
1931 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1932 }
1933
1934 netdev_tx_sent_queue(txq, bytes_sent);
1935
1936 gfar_wmb();
1937
1938 txbdp_start->lstatus = cpu_to_be32(lstatus);
1939
1940 gfar_wmb();
1941
1942 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1943
1944
1945
1946
1947 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1948 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1949
1950 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1951
1952
1953
1954
1955
1956
1957 spin_lock_bh(&tx_queue->txlock);
1958
1959 tx_queue->num_txbdfree -= (nr_txbds);
1960 spin_unlock_bh(&tx_queue->txlock);
1961
1962
1963
1964
1965 if (!tx_queue->num_txbdfree) {
1966 netif_tx_stop_queue(txq);
1967
1968 dev->stats.tx_fifo_errors++;
1969 }
1970
1971
1972 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1973
1974 return NETDEV_TX_OK;
1975
1976dma_map_err:
1977 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
1978 if (do_tstamp)
1979 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1980 for (i = 0; i < nr_frags; i++) {
1981 lstatus = be32_to_cpu(txbdp->lstatus);
1982 if (!(lstatus & BD_LFLAG(TXBD_READY)))
1983 break;
1984
1985 lstatus &= ~BD_LFLAG(TXBD_READY);
1986 txbdp->lstatus = cpu_to_be32(lstatus);
1987 bufaddr = be32_to_cpu(txbdp->bufPtr);
1988 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
1989 DMA_TO_DEVICE);
1990 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1991 }
1992 gfar_wmb();
1993 dev_kfree_skb_any(skb);
1994 return NETDEV_TX_OK;
1995}
1996
1997
1998static int gfar_set_mac_address(struct net_device *dev)
1999{
2000 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2001
2002 return 0;
2003}
2004
2005static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2006{
2007 struct gfar_private *priv = netdev_priv(dev);
2008
2009 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2010 cpu_relax();
2011
2012 if (dev->flags & IFF_UP)
2013 stop_gfar(dev);
2014
2015 dev->mtu = new_mtu;
2016
2017 if (dev->flags & IFF_UP)
2018 startup_gfar(dev);
2019
2020 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2021
2022 return 0;
2023}
2024
2025static void reset_gfar(struct net_device *ndev)
2026{
2027 struct gfar_private *priv = netdev_priv(ndev);
2028
2029 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2030 cpu_relax();
2031
2032 stop_gfar(ndev);
2033 startup_gfar(ndev);
2034
2035 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2036}
2037
2038
2039
2040
2041
2042
2043static void gfar_reset_task(struct work_struct *work)
2044{
2045 struct gfar_private *priv = container_of(work, struct gfar_private,
2046 reset_task);
2047 reset_gfar(priv->ndev);
2048}
2049
2050static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
2051{
2052 struct gfar_private *priv = netdev_priv(dev);
2053
2054 dev->stats.tx_errors++;
2055 schedule_work(&priv->reset_task);
2056}
2057
2058static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
2059{
2060 struct hwtstamp_config config;
2061 struct gfar_private *priv = netdev_priv(netdev);
2062
2063 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2064 return -EFAULT;
2065
2066
2067 if (config.flags)
2068 return -EINVAL;
2069
2070 switch (config.tx_type) {
2071 case HWTSTAMP_TX_OFF:
2072 priv->hwts_tx_en = 0;
2073 break;
2074 case HWTSTAMP_TX_ON:
2075 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2076 return -ERANGE;
2077 priv->hwts_tx_en = 1;
2078 break;
2079 default:
2080 return -ERANGE;
2081 }
2082
2083 switch (config.rx_filter) {
2084 case HWTSTAMP_FILTER_NONE:
2085 if (priv->hwts_rx_en) {
2086 priv->hwts_rx_en = 0;
2087 reset_gfar(netdev);
2088 }
2089 break;
2090 default:
2091 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2092 return -ERANGE;
2093 if (!priv->hwts_rx_en) {
2094 priv->hwts_rx_en = 1;
2095 reset_gfar(netdev);
2096 }
2097 config.rx_filter = HWTSTAMP_FILTER_ALL;
2098 break;
2099 }
2100
2101 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2102 -EFAULT : 0;
2103}
2104
2105static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
2106{
2107 struct hwtstamp_config config;
2108 struct gfar_private *priv = netdev_priv(netdev);
2109
2110 config.flags = 0;
2111 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2112 config.rx_filter = (priv->hwts_rx_en ?
2113 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
2114
2115 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2116 -EFAULT : 0;
2117}
2118
2119static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2120{
2121 struct phy_device *phydev = dev->phydev;
2122
2123 if (!netif_running(dev))
2124 return -EINVAL;
2125
2126 if (cmd == SIOCSHWTSTAMP)
2127 return gfar_hwtstamp_set(dev, rq);
2128 if (cmd == SIOCGHWTSTAMP)
2129 return gfar_hwtstamp_get(dev, rq);
2130
2131 if (!phydev)
2132 return -ENODEV;
2133
2134 return phy_mii_ioctl(phydev, rq, cmd);
2135}
2136
2137
2138static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2139{
2140 struct net_device *dev = tx_queue->dev;
2141 struct netdev_queue *txq;
2142 struct gfar_private *priv = netdev_priv(dev);
2143 struct txbd8 *bdp, *next = NULL;
2144 struct txbd8 *lbdp = NULL;
2145 struct txbd8 *base = tx_queue->tx_bd_base;
2146 struct sk_buff *skb;
2147 int skb_dirtytx;
2148 int tx_ring_size = tx_queue->tx_ring_size;
2149 int frags = 0, nr_txbds = 0;
2150 int i;
2151 int howmany = 0;
2152 int tqi = tx_queue->qindex;
2153 unsigned int bytes_sent = 0;
2154 u32 lstatus;
2155 size_t buflen;
2156
2157 txq = netdev_get_tx_queue(dev, tqi);
2158 bdp = tx_queue->dirty_tx;
2159 skb_dirtytx = tx_queue->skb_dirtytx;
2160
2161 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2162 bool do_tstamp;
2163
2164 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2165 priv->hwts_tx_en;
2166
2167 frags = skb_shinfo(skb)->nr_frags;
2168
2169
2170
2171
2172 if (unlikely(do_tstamp))
2173 nr_txbds = frags + 2;
2174 else
2175 nr_txbds = frags + 1;
2176
2177 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2178
2179 lstatus = be32_to_cpu(lbdp->lstatus);
2180
2181
2182 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2183 (lstatus & BD_LENGTH_MASK))
2184 break;
2185
2186 if (unlikely(do_tstamp)) {
2187 next = next_txbd(bdp, base, tx_ring_size);
2188 buflen = be16_to_cpu(next->length) +
2189 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2190 } else
2191 buflen = be16_to_cpu(bdp->length);
2192
2193 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2194 buflen, DMA_TO_DEVICE);
2195
2196 if (unlikely(do_tstamp)) {
2197 struct skb_shared_hwtstamps shhwtstamps;
2198 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2199 ~0x7UL);
2200
2201 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2202 shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2203 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2204 skb_tstamp_tx(skb, &shhwtstamps);
2205 gfar_clear_txbd_status(bdp);
2206 bdp = next;
2207 }
2208
2209 gfar_clear_txbd_status(bdp);
2210 bdp = next_txbd(bdp, base, tx_ring_size);
2211
2212 for (i = 0; i < frags; i++) {
2213 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2214 be16_to_cpu(bdp->length),
2215 DMA_TO_DEVICE);
2216 gfar_clear_txbd_status(bdp);
2217 bdp = next_txbd(bdp, base, tx_ring_size);
2218 }
2219
2220 bytes_sent += GFAR_CB(skb)->bytes_sent;
2221
2222 dev_kfree_skb_any(skb);
2223
2224 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2225
2226 skb_dirtytx = (skb_dirtytx + 1) &
2227 TX_RING_MOD_MASK(tx_ring_size);
2228
2229 howmany++;
2230 spin_lock(&tx_queue->txlock);
2231 tx_queue->num_txbdfree += nr_txbds;
2232 spin_unlock(&tx_queue->txlock);
2233 }
2234
2235
2236 if (tx_queue->num_txbdfree &&
2237 netif_tx_queue_stopped(txq) &&
2238 !(test_bit(GFAR_DOWN, &priv->state)))
2239 netif_wake_subqueue(priv->ndev, tqi);
2240
2241
2242 tx_queue->skb_dirtytx = skb_dirtytx;
2243 tx_queue->dirty_tx = bdp;
2244
2245 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2246}
2247
2248static void count_errors(u32 lstatus, struct net_device *ndev)
2249{
2250 struct gfar_private *priv = netdev_priv(ndev);
2251 struct net_device_stats *stats = &ndev->stats;
2252 struct gfar_extra_stats *estats = &priv->extra_stats;
2253
2254
2255 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2256 stats->rx_length_errors++;
2257
2258 atomic64_inc(&estats->rx_trunc);
2259
2260 return;
2261 }
2262
2263 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2264 stats->rx_length_errors++;
2265
2266 if (lstatus & BD_LFLAG(RXBD_LARGE))
2267 atomic64_inc(&estats->rx_large);
2268 else
2269 atomic64_inc(&estats->rx_short);
2270 }
2271 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2272 stats->rx_frame_errors++;
2273 atomic64_inc(&estats->rx_nonoctet);
2274 }
2275 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2276 atomic64_inc(&estats->rx_crcerr);
2277 stats->rx_crc_errors++;
2278 }
2279 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2280 atomic64_inc(&estats->rx_overrun);
2281 stats->rx_over_errors++;
2282 }
2283}
2284
2285static irqreturn_t gfar_receive(int irq, void *grp_id)
2286{
2287 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2288 unsigned long flags;
2289 u32 imask, ievent;
2290
2291 ievent = gfar_read(&grp->regs->ievent);
2292
2293 if (unlikely(ievent & IEVENT_FGPI)) {
2294 gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2295 return IRQ_HANDLED;
2296 }
2297
2298 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2299 spin_lock_irqsave(&grp->grplock, flags);
2300 imask = gfar_read(&grp->regs->imask);
2301 imask &= IMASK_RX_DISABLED;
2302 gfar_write(&grp->regs->imask, imask);
2303 spin_unlock_irqrestore(&grp->grplock, flags);
2304 __napi_schedule(&grp->napi_rx);
2305 } else {
2306
2307
2308
2309 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2310 }
2311
2312 return IRQ_HANDLED;
2313}
2314
2315
2316static irqreturn_t gfar_transmit(int irq, void *grp_id)
2317{
2318 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2319 unsigned long flags;
2320 u32 imask;
2321
2322 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2323 spin_lock_irqsave(&grp->grplock, flags);
2324 imask = gfar_read(&grp->regs->imask);
2325 imask &= IMASK_TX_DISABLED;
2326 gfar_write(&grp->regs->imask, imask);
2327 spin_unlock_irqrestore(&grp->grplock, flags);
2328 __napi_schedule(&grp->napi_tx);
2329 } else {
2330
2331
2332
2333 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2334 }
2335
2336 return IRQ_HANDLED;
2337}
2338
2339static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2340 struct sk_buff *skb, bool first)
2341{
2342 int size = lstatus & BD_LENGTH_MASK;
2343 struct page *page = rxb->page;
2344
2345 if (likely(first)) {
2346 skb_put(skb, size);
2347 } else {
2348
2349 if (lstatus & BD_LFLAG(RXBD_LAST))
2350 size -= skb->len;
2351
2352 WARN(size < 0, "gianfar: rx fragment size underflow");
2353 if (size < 0)
2354 return false;
2355
2356 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2357 rxb->page_offset + RXBUF_ALIGNMENT,
2358 size, GFAR_RXB_TRUESIZE);
2359 }
2360
2361
2362 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2363 return false;
2364
2365
2366 rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2367
2368 page_ref_inc(page);
2369
2370 return true;
2371}
2372
2373static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2374 struct gfar_rx_buff *old_rxb)
2375{
2376 struct gfar_rx_buff *new_rxb;
2377 u16 nta = rxq->next_to_alloc;
2378
2379 new_rxb = &rxq->rx_buff[nta];
2380
2381
2382 nta++;
2383 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2384
2385
2386 *new_rxb = *old_rxb;
2387
2388
2389 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2390 old_rxb->page_offset,
2391 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2392}
2393
2394static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2395 u32 lstatus, struct sk_buff *skb)
2396{
2397 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2398 struct page *page = rxb->page;
2399 bool first = false;
2400
2401 if (likely(!skb)) {
2402 void *buff_addr = page_address(page) + rxb->page_offset;
2403
2404 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2405 if (unlikely(!skb)) {
2406 gfar_rx_alloc_err(rx_queue);
2407 return NULL;
2408 }
2409 skb_reserve(skb, RXBUF_ALIGNMENT);
2410 first = true;
2411 }
2412
2413 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
2414 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2415
2416 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
2417
2418 gfar_reuse_rx_page(rx_queue, rxb);
2419 } else {
2420
2421 dma_unmap_page(rx_queue->dev, rxb->dma,
2422 PAGE_SIZE, DMA_FROM_DEVICE);
2423 }
2424
2425
2426 rxb->page = NULL;
2427
2428 return skb;
2429}
2430
2431static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2432{
2433
2434
2435
2436
2437 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
2438 (RXFCB_CIP | RXFCB_CTU))
2439 skb->ip_summed = CHECKSUM_UNNECESSARY;
2440 else
2441 skb_checksum_none_assert(skb);
2442}
2443
2444
2445static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
2446{
2447 struct gfar_private *priv = netdev_priv(ndev);
2448 struct rxfcb *fcb = NULL;
2449
2450
2451 fcb = (struct rxfcb *)skb->data;
2452
2453
2454
2455
2456 if (priv->uses_rxfcb)
2457 skb_pull(skb, GMAC_FCB_LEN);
2458
2459
2460 if (priv->hwts_rx_en) {
2461 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2462 u64 *ns = (u64 *) skb->data;
2463
2464 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2465 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2466 }
2467
2468 if (priv->padding)
2469 skb_pull(skb, priv->padding);
2470
2471
2472 pskb_trim(skb, skb->len - ETH_FCS_LEN);
2473
2474 if (ndev->features & NETIF_F_RXCSUM)
2475 gfar_rx_checksum(skb, fcb);
2476
2477
2478
2479
2480
2481 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2482 be16_to_cpu(fcb->flags) & RXFCB_VLN)
2483 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2484 be16_to_cpu(fcb->vlctl));
2485}
2486
2487
2488
2489
2490
2491static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
2492 int rx_work_limit)
2493{
2494 struct net_device *ndev = rx_queue->ndev;
2495 struct gfar_private *priv = netdev_priv(ndev);
2496 struct rxbd8 *bdp;
2497 int i, howmany = 0;
2498 struct sk_buff *skb = rx_queue->skb;
2499 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
2500 unsigned int total_bytes = 0, total_pkts = 0;
2501
2502
2503 i = rx_queue->next_to_clean;
2504
2505 while (rx_work_limit--) {
2506 u32 lstatus;
2507
2508 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
2509 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2510 cleaned_cnt = 0;
2511 }
2512
2513 bdp = &rx_queue->rx_bd_base[i];
2514 lstatus = be32_to_cpu(bdp->lstatus);
2515 if (lstatus & BD_LFLAG(RXBD_EMPTY))
2516 break;
2517
2518
2519 if (skb &&
2520 (lstatus & BD_LFLAG(RXBD_FIRST))) {
2521
2522 dev_kfree_skb(skb);
2523 skb = NULL;
2524 rx_queue->stats.rx_dropped++;
2525
2526
2527 }
2528
2529
2530 rmb();
2531
2532
2533 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
2534 if (unlikely(!skb))
2535 break;
2536
2537 cleaned_cnt++;
2538 howmany++;
2539
2540 if (unlikely(++i == rx_queue->rx_ring_size))
2541 i = 0;
2542
2543 rx_queue->next_to_clean = i;
2544
2545
2546 if (!(lstatus & BD_LFLAG(RXBD_LAST)))
2547 continue;
2548
2549 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
2550 count_errors(lstatus, ndev);
2551
2552
2553 dev_kfree_skb(skb);
2554 skb = NULL;
2555 rx_queue->stats.rx_dropped++;
2556 continue;
2557 }
2558
2559 gfar_process_frame(ndev, skb);
2560
2561
2562 total_pkts++;
2563 total_bytes += skb->len;
2564
2565 skb_record_rx_queue(skb, rx_queue->qindex);
2566
2567 skb->protocol = eth_type_trans(skb, ndev);
2568
2569
2570 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2571
2572 skb = NULL;
2573 }
2574
2575
2576 rx_queue->skb = skb;
2577
2578 rx_queue->stats.rx_packets += total_pkts;
2579 rx_queue->stats.rx_bytes += total_bytes;
2580
2581 if (cleaned_cnt)
2582 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2583
2584
2585 if (unlikely(priv->tx_actual_en)) {
2586 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2587
2588 gfar_write(rx_queue->rfbptr, bdp_dma);
2589 }
2590
2591 return howmany;
2592}
2593
2594static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2595{
2596 struct gfar_priv_grp *gfargrp =
2597 container_of(napi, struct gfar_priv_grp, napi_rx);
2598 struct gfar __iomem *regs = gfargrp->regs;
2599 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2600 int work_done = 0;
2601
2602
2603
2604
2605 gfar_write(®s->ievent, IEVENT_RX_MASK);
2606
2607 work_done = gfar_clean_rx_ring(rx_queue, budget);
2608
2609 if (work_done < budget) {
2610 u32 imask;
2611 napi_complete_done(napi, work_done);
2612
2613 gfar_write(®s->rstat, gfargrp->rstat);
2614
2615 spin_lock_irq(&gfargrp->grplock);
2616 imask = gfar_read(®s->imask);
2617 imask |= IMASK_RX_DEFAULT;
2618 gfar_write(®s->imask, imask);
2619 spin_unlock_irq(&gfargrp->grplock);
2620 }
2621
2622 return work_done;
2623}
2624
2625static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2626{
2627 struct gfar_priv_grp *gfargrp =
2628 container_of(napi, struct gfar_priv_grp, napi_tx);
2629 struct gfar __iomem *regs = gfargrp->regs;
2630 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2631 u32 imask;
2632
2633
2634
2635
2636 gfar_write(®s->ievent, IEVENT_TX_MASK);
2637
2638
2639 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2640 gfar_clean_tx_ring(tx_queue);
2641
2642 napi_complete(napi);
2643
2644 spin_lock_irq(&gfargrp->grplock);
2645 imask = gfar_read(®s->imask);
2646 imask |= IMASK_TX_DEFAULT;
2647 gfar_write(®s->imask, imask);
2648 spin_unlock_irq(&gfargrp->grplock);
2649
2650 return 0;
2651}
2652
2653
2654static irqreturn_t gfar_error(int irq, void *grp_id)
2655{
2656 struct gfar_priv_grp *gfargrp = grp_id;
2657 struct gfar __iomem *regs = gfargrp->regs;
2658 struct gfar_private *priv= gfargrp->priv;
2659 struct net_device *dev = priv->ndev;
2660
2661
2662 u32 events = gfar_read(®s->ievent);
2663
2664
2665 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
2666
2667
2668 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2669 (events & IEVENT_MAG))
2670 events &= ~IEVENT_MAG;
2671
2672
2673 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2674 netdev_dbg(dev,
2675 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
2676 events, gfar_read(®s->imask));
2677
2678
2679 if (events & IEVENT_TXE) {
2680 dev->stats.tx_errors++;
2681
2682 if (events & IEVENT_LC)
2683 dev->stats.tx_window_errors++;
2684 if (events & IEVENT_CRL)
2685 dev->stats.tx_aborted_errors++;
2686 if (events & IEVENT_XFUN) {
2687 netif_dbg(priv, tx_err, dev,
2688 "TX FIFO underrun, packet dropped\n");
2689 dev->stats.tx_dropped++;
2690 atomic64_inc(&priv->extra_stats.tx_underrun);
2691
2692 schedule_work(&priv->reset_task);
2693 }
2694 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
2695 }
2696 if (events & IEVENT_BSY) {
2697 dev->stats.rx_over_errors++;
2698 atomic64_inc(&priv->extra_stats.rx_bsy);
2699
2700 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
2701 gfar_read(®s->rstat));
2702 }
2703 if (events & IEVENT_BABR) {
2704 dev->stats.rx_errors++;
2705 atomic64_inc(&priv->extra_stats.rx_babr);
2706
2707 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
2708 }
2709 if (events & IEVENT_EBERR) {
2710 atomic64_inc(&priv->extra_stats.eberr);
2711 netif_dbg(priv, rx_err, dev, "bus error\n");
2712 }
2713 if (events & IEVENT_RXC)
2714 netif_dbg(priv, rx_status, dev, "control frame\n");
2715
2716 if (events & IEVENT_BABT) {
2717 atomic64_inc(&priv->extra_stats.tx_babt);
2718 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
2719 }
2720 return IRQ_HANDLED;
2721}
2722
2723
2724static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2725{
2726 struct gfar_priv_grp *gfargrp = grp_id;
2727
2728
2729 u32 events = gfar_read(&gfargrp->regs->ievent);
2730
2731
2732 if (events & IEVENT_RX_MASK)
2733 gfar_receive(irq, grp_id);
2734
2735
2736 if (events & IEVENT_TX_MASK)
2737 gfar_transmit(irq, grp_id);
2738
2739
2740 if (events & IEVENT_ERR_MASK)
2741 gfar_error(irq, grp_id);
2742
2743 return IRQ_HANDLED;
2744}
2745
2746#ifdef CONFIG_NET_POLL_CONTROLLER
2747
2748
2749
2750
2751static void gfar_netpoll(struct net_device *dev)
2752{
2753 struct gfar_private *priv = netdev_priv(dev);
2754 int i;
2755
2756
2757 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2758 for (i = 0; i < priv->num_grps; i++) {
2759 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2760
2761 disable_irq(gfar_irq(grp, TX)->irq);
2762 disable_irq(gfar_irq(grp, RX)->irq);
2763 disable_irq(gfar_irq(grp, ER)->irq);
2764 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2765 enable_irq(gfar_irq(grp, ER)->irq);
2766 enable_irq(gfar_irq(grp, RX)->irq);
2767 enable_irq(gfar_irq(grp, TX)->irq);
2768 }
2769 } else {
2770 for (i = 0; i < priv->num_grps; i++) {
2771 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2772
2773 disable_irq(gfar_irq(grp, TX)->irq);
2774 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2775 enable_irq(gfar_irq(grp, TX)->irq);
2776 }
2777 }
2778}
2779#endif
2780
2781static void free_grp_irqs(struct gfar_priv_grp *grp)
2782{
2783 free_irq(gfar_irq(grp, TX)->irq, grp);
2784 free_irq(gfar_irq(grp, RX)->irq, grp);
2785 free_irq(gfar_irq(grp, ER)->irq, grp);
2786}
2787
2788static int register_grp_irqs(struct gfar_priv_grp *grp)
2789{
2790 struct gfar_private *priv = grp->priv;
2791 struct net_device *dev = priv->ndev;
2792 int err;
2793
2794
2795
2796
2797 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2798
2799
2800
2801 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
2802 gfar_irq(grp, ER)->name, grp);
2803 if (err < 0) {
2804 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2805 gfar_irq(grp, ER)->irq);
2806
2807 goto err_irq_fail;
2808 }
2809 enable_irq_wake(gfar_irq(grp, ER)->irq);
2810
2811 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2812 gfar_irq(grp, TX)->name, grp);
2813 if (err < 0) {
2814 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2815 gfar_irq(grp, TX)->irq);
2816 goto tx_irq_fail;
2817 }
2818 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2819 gfar_irq(grp, RX)->name, grp);
2820 if (err < 0) {
2821 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2822 gfar_irq(grp, RX)->irq);
2823 goto rx_irq_fail;
2824 }
2825 enable_irq_wake(gfar_irq(grp, RX)->irq);
2826
2827 } else {
2828 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2829 gfar_irq(grp, TX)->name, grp);
2830 if (err < 0) {
2831 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2832 gfar_irq(grp, TX)->irq);
2833 goto err_irq_fail;
2834 }
2835 enable_irq_wake(gfar_irq(grp, TX)->irq);
2836 }
2837
2838 return 0;
2839
2840rx_irq_fail:
2841 free_irq(gfar_irq(grp, TX)->irq, grp);
2842tx_irq_fail:
2843 free_irq(gfar_irq(grp, ER)->irq, grp);
2844err_irq_fail:
2845 return err;
2846
2847}
2848
2849static void gfar_free_irq(struct gfar_private *priv)
2850{
2851 int i;
2852
2853
2854 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2855 for (i = 0; i < priv->num_grps; i++)
2856 free_grp_irqs(&priv->gfargrp[i]);
2857 } else {
2858 for (i = 0; i < priv->num_grps; i++)
2859 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2860 &priv->gfargrp[i]);
2861 }
2862}
2863
2864static int gfar_request_irq(struct gfar_private *priv)
2865{
2866 int err, i, j;
2867
2868 for (i = 0; i < priv->num_grps; i++) {
2869 err = register_grp_irqs(&priv->gfargrp[i]);
2870 if (err) {
2871 for (j = 0; j < i; j++)
2872 free_grp_irqs(&priv->gfargrp[j]);
2873 return err;
2874 }
2875 }
2876
2877 return 0;
2878}
2879
2880
2881
2882
2883static int gfar_enet_open(struct net_device *dev)
2884{
2885 struct gfar_private *priv = netdev_priv(dev);
2886 int err;
2887
2888 err = init_phy(dev);
2889 if (err)
2890 return err;
2891
2892 err = gfar_request_irq(priv);
2893 if (err)
2894 return err;
2895
2896 err = startup_gfar(dev);
2897 if (err)
2898 return err;
2899
2900 return err;
2901}
2902
2903
2904static int gfar_close(struct net_device *dev)
2905{
2906 struct gfar_private *priv = netdev_priv(dev);
2907
2908 cancel_work_sync(&priv->reset_task);
2909 stop_gfar(dev);
2910
2911
2912 phy_disconnect(dev->phydev);
2913
2914 gfar_free_irq(priv);
2915
2916 return 0;
2917}
2918
2919
2920
2921
2922static void gfar_clear_exact_match(struct net_device *dev)
2923{
2924 int idx;
2925 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
2926
2927 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
2928 gfar_set_mac_for_addr(dev, idx, zero_arr);
2929}
2930
2931
2932
2933
2934
2935
2936static void gfar_set_multi(struct net_device *dev)
2937{
2938 struct netdev_hw_addr *ha;
2939 struct gfar_private *priv = netdev_priv(dev);
2940 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2941 u32 tempval;
2942
2943 if (dev->flags & IFF_PROMISC) {
2944
2945 tempval = gfar_read(®s->rctrl);
2946 tempval |= RCTRL_PROM;
2947 gfar_write(®s->rctrl, tempval);
2948 } else {
2949
2950 tempval = gfar_read(®s->rctrl);
2951 tempval &= ~(RCTRL_PROM);
2952 gfar_write(®s->rctrl, tempval);
2953 }
2954
2955 if (dev->flags & IFF_ALLMULTI) {
2956
2957 gfar_write(®s->igaddr0, 0xffffffff);
2958 gfar_write(®s->igaddr1, 0xffffffff);
2959 gfar_write(®s->igaddr2, 0xffffffff);
2960 gfar_write(®s->igaddr3, 0xffffffff);
2961 gfar_write(®s->igaddr4, 0xffffffff);
2962 gfar_write(®s->igaddr5, 0xffffffff);
2963 gfar_write(®s->igaddr6, 0xffffffff);
2964 gfar_write(®s->igaddr7, 0xffffffff);
2965 gfar_write(®s->gaddr0, 0xffffffff);
2966 gfar_write(®s->gaddr1, 0xffffffff);
2967 gfar_write(®s->gaddr2, 0xffffffff);
2968 gfar_write(®s->gaddr3, 0xffffffff);
2969 gfar_write(®s->gaddr4, 0xffffffff);
2970 gfar_write(®s->gaddr5, 0xffffffff);
2971 gfar_write(®s->gaddr6, 0xffffffff);
2972 gfar_write(®s->gaddr7, 0xffffffff);
2973 } else {
2974 int em_num;
2975 int idx;
2976
2977
2978 gfar_write(®s->igaddr0, 0x0);
2979 gfar_write(®s->igaddr1, 0x0);
2980 gfar_write(®s->igaddr2, 0x0);
2981 gfar_write(®s->igaddr3, 0x0);
2982 gfar_write(®s->igaddr4, 0x0);
2983 gfar_write(®s->igaddr5, 0x0);
2984 gfar_write(®s->igaddr6, 0x0);
2985 gfar_write(®s->igaddr7, 0x0);
2986 gfar_write(®s->gaddr0, 0x0);
2987 gfar_write(®s->gaddr1, 0x0);
2988 gfar_write(®s->gaddr2, 0x0);
2989 gfar_write(®s->gaddr3, 0x0);
2990 gfar_write(®s->gaddr4, 0x0);
2991 gfar_write(®s->gaddr5, 0x0);
2992 gfar_write(®s->gaddr6, 0x0);
2993 gfar_write(®s->gaddr7, 0x0);
2994
2995
2996
2997
2998
2999 if (priv->extended_hash) {
3000 em_num = GFAR_EM_NUM + 1;
3001 gfar_clear_exact_match(dev);
3002 idx = 1;
3003 } else {
3004 idx = 0;
3005 em_num = 0;
3006 }
3007
3008 if (netdev_mc_empty(dev))
3009 return;
3010
3011
3012 netdev_for_each_mc_addr(ha, dev) {
3013 if (idx < em_num) {
3014 gfar_set_mac_for_addr(dev, idx, ha->addr);
3015 idx++;
3016 } else
3017 gfar_set_hash_for_addr(dev, ha->addr);
3018 }
3019 }
3020}
3021
3022void gfar_mac_reset(struct gfar_private *priv)
3023{
3024 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3025 u32 tempval;
3026
3027
3028 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
3029
3030
3031 udelay(3);
3032
3033
3034
3035
3036 gfar_write(®s->maccfg1, 0);
3037
3038 udelay(3);
3039
3040 gfar_rx_offload_en(priv);
3041
3042
3043 gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE);
3044 gfar_write(®s->mrblr, GFAR_RXB_SIZE);
3045
3046
3047 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
3048
3049
3050 tempval = MACCFG2_INIT_SETTINGS;
3051
3052
3053
3054
3055
3056 if (gfar_has_errata(priv, GFAR_ERRATA_74))
3057 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
3058
3059 gfar_write(®s->maccfg2, tempval);
3060
3061
3062 gfar_write(®s->igaddr0, 0);
3063 gfar_write(®s->igaddr1, 0);
3064 gfar_write(®s->igaddr2, 0);
3065 gfar_write(®s->igaddr3, 0);
3066 gfar_write(®s->igaddr4, 0);
3067 gfar_write(®s->igaddr5, 0);
3068 gfar_write(®s->igaddr6, 0);
3069 gfar_write(®s->igaddr7, 0);
3070
3071 gfar_write(®s->gaddr0, 0);
3072 gfar_write(®s->gaddr1, 0);
3073 gfar_write(®s->gaddr2, 0);
3074 gfar_write(®s->gaddr3, 0);
3075 gfar_write(®s->gaddr4, 0);
3076 gfar_write(®s->gaddr5, 0);
3077 gfar_write(®s->gaddr6, 0);
3078 gfar_write(®s->gaddr7, 0);
3079
3080 if (priv->extended_hash)
3081 gfar_clear_exact_match(priv->ndev);
3082
3083 gfar_mac_rx_config(priv);
3084
3085 gfar_mac_tx_config(priv);
3086
3087 gfar_set_mac_address(priv->ndev);
3088
3089 gfar_set_multi(priv->ndev);
3090
3091
3092 gfar_ints_disable(priv);
3093
3094
3095 gfar_configure_coalescing_all(priv);
3096}
3097
3098static void gfar_hw_init(struct gfar_private *priv)
3099{
3100 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3101 u32 attrs;
3102
3103
3104
3105
3106 gfar_halt(priv);
3107
3108 gfar_mac_reset(priv);
3109
3110
3111 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
3112 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
3113
3114
3115 gfar_write(®s->rmon.cam1, 0xffffffff);
3116 gfar_write(®s->rmon.cam2, 0xffffffff);
3117 }
3118
3119
3120 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
3121
3122
3123 attrs = ATTRELI_EL(priv->rx_stash_size) |
3124 ATTRELI_EI(priv->rx_stash_index);
3125
3126 gfar_write(®s->attreli, attrs);
3127
3128
3129
3130
3131 attrs = ATTR_INIT_SETTINGS;
3132
3133 if (priv->bd_stash_en)
3134 attrs |= ATTR_BDSTASH;
3135
3136 if (priv->rx_stash_size != 0)
3137 attrs |= ATTR_BUFSTASH;
3138
3139 gfar_write(®s->attr, attrs);
3140
3141
3142 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
3143 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
3144 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
3145
3146
3147 if (priv->num_grps > 1)
3148 gfar_write_isrg(priv);
3149}
3150
3151static const struct net_device_ops gfar_netdev_ops = {
3152 .ndo_open = gfar_enet_open,
3153 .ndo_start_xmit = gfar_start_xmit,
3154 .ndo_stop = gfar_close,
3155 .ndo_change_mtu = gfar_change_mtu,
3156 .ndo_set_features = gfar_set_features,
3157 .ndo_set_rx_mode = gfar_set_multi,
3158 .ndo_tx_timeout = gfar_timeout,
3159 .ndo_do_ioctl = gfar_ioctl,
3160 .ndo_get_stats = gfar_get_stats,
3161 .ndo_change_carrier = fixed_phy_change_carrier,
3162 .ndo_set_mac_address = gfar_set_mac_addr,
3163 .ndo_validate_addr = eth_validate_addr,
3164#ifdef CONFIG_NET_POLL_CONTROLLER
3165 .ndo_poll_controller = gfar_netpoll,
3166#endif
3167};
3168
3169
3170
3171
3172static int gfar_probe(struct platform_device *ofdev)
3173{
3174 struct device_node *np = ofdev->dev.of_node;
3175 struct net_device *dev = NULL;
3176 struct gfar_private *priv = NULL;
3177 int err = 0, i;
3178
3179 err = gfar_of_init(ofdev, &dev);
3180
3181 if (err)
3182 return err;
3183
3184 priv = netdev_priv(dev);
3185 priv->ndev = dev;
3186 priv->ofdev = ofdev;
3187 priv->dev = &ofdev->dev;
3188 SET_NETDEV_DEV(dev, &ofdev->dev);
3189
3190 INIT_WORK(&priv->reset_task, gfar_reset_task);
3191
3192 platform_set_drvdata(ofdev, priv);
3193
3194 gfar_detect_errata(priv);
3195
3196
3197 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
3198
3199
3200 dev->watchdog_timeo = TX_TIMEOUT;
3201
3202 dev->mtu = 1500;
3203 dev->min_mtu = 50;
3204 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
3205 dev->netdev_ops = &gfar_netdev_ops;
3206 dev->ethtool_ops = &gfar_ethtool_ops;
3207
3208
3209 for (i = 0; i < priv->num_grps; i++) {
3210 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3211 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
3212 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
3213 gfar_poll_tx_sq, 2);
3214 }
3215
3216 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
3217 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
3218 NETIF_F_RXCSUM;
3219 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
3220 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
3221 }
3222
3223 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
3224 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
3225 NETIF_F_HW_VLAN_CTAG_RX;
3226 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3227 }
3228
3229 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3230
3231 gfar_init_addr_hash_table(priv);
3232
3233
3234
3235
3236 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3237 priv->padding = 8 + DEFAULT_PADDING;
3238
3239 if (dev->features & NETIF_F_IP_CSUM ||
3240 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3241 dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
3242
3243
3244 for (i = 0; i < priv->num_tx_queues; i++) {
3245 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
3246 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
3247 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
3248 priv->tx_queue[i]->txic = DEFAULT_TXIC;
3249 }
3250
3251 for (i = 0; i < priv->num_rx_queues; i++) {
3252 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
3253 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
3254 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
3255 }
3256
3257
3258 priv->rx_filer_enable =
3259 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
3260
3261 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
3262
3263 if (priv->num_tx_queues == 1)
3264 priv->prio_sched_en = 1;
3265
3266 set_bit(GFAR_DOWN, &priv->state);
3267
3268 gfar_hw_init(priv);
3269
3270
3271 netif_carrier_off(dev);
3272
3273 err = register_netdev(dev);
3274
3275 if (err) {
3276 pr_err("%s: Cannot register net device, aborting\n", dev->name);
3277 goto register_fail;
3278 }
3279
3280 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
3281 priv->wol_supported |= GFAR_WOL_MAGIC;
3282
3283 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
3284 priv->rx_filer_enable)
3285 priv->wol_supported |= GFAR_WOL_FILER_UCAST;
3286
3287 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
3288
3289
3290 for (i = 0; i < priv->num_grps; i++) {
3291 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3292 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3293 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
3294 dev->name, "_g", '0' + i, "_tx");
3295 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
3296 dev->name, "_g", '0' + i, "_rx");
3297 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
3298 dev->name, "_g", '0' + i, "_er");
3299 } else
3300 strcpy(gfar_irq(grp, TX)->name, dev->name);
3301 }
3302
3303
3304 gfar_init_filer_table(priv);
3305
3306
3307 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
3308
3309
3310
3311
3312 netdev_info(dev, "Running with NAPI enabled\n");
3313 for (i = 0; i < priv->num_rx_queues; i++)
3314 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
3315 i, priv->rx_queue[i]->rx_ring_size);
3316 for (i = 0; i < priv->num_tx_queues; i++)
3317 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
3318 i, priv->tx_queue[i]->tx_ring_size);
3319
3320 return 0;
3321
3322register_fail:
3323 if (of_phy_is_fixed_link(np))
3324 of_phy_deregister_fixed_link(np);
3325 unmap_group_regs(priv);
3326 gfar_free_rx_queues(priv);
3327 gfar_free_tx_queues(priv);
3328 of_node_put(priv->phy_node);
3329 of_node_put(priv->tbi_node);
3330 free_gfar_dev(priv);
3331 return err;
3332}
3333
3334static int gfar_remove(struct platform_device *ofdev)
3335{
3336 struct gfar_private *priv = platform_get_drvdata(ofdev);
3337 struct device_node *np = ofdev->dev.of_node;
3338
3339 of_node_put(priv->phy_node);
3340 of_node_put(priv->tbi_node);
3341
3342 unregister_netdev(priv->ndev);
3343
3344 if (of_phy_is_fixed_link(np))
3345 of_phy_deregister_fixed_link(np);
3346
3347 unmap_group_regs(priv);
3348 gfar_free_rx_queues(priv);
3349 gfar_free_tx_queues(priv);
3350 free_gfar_dev(priv);
3351
3352 return 0;
3353}
3354
3355#ifdef CONFIG_PM
3356
3357static void __gfar_filer_disable(struct gfar_private *priv)
3358{
3359 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3360 u32 temp;
3361
3362 temp = gfar_read(®s->rctrl);
3363 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
3364 gfar_write(®s->rctrl, temp);
3365}
3366
3367static void __gfar_filer_enable(struct gfar_private *priv)
3368{
3369 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3370 u32 temp;
3371
3372 temp = gfar_read(®s->rctrl);
3373 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
3374 gfar_write(®s->rctrl, temp);
3375}
3376
3377
3378static void gfar_filer_config_wol(struct gfar_private *priv)
3379{
3380 unsigned int i;
3381 u32 rqfcr;
3382
3383 __gfar_filer_disable(priv);
3384
3385
3386 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
3387 for (i = 0; i <= MAX_FILER_IDX; i++)
3388 gfar_write_filer(priv, i, rqfcr, 0);
3389
3390 i = 0;
3391 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
3392
3393 struct net_device *ndev = priv->ndev;
3394
3395 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
3396 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
3397 (ndev->dev_addr[1] << 8) |
3398 ndev->dev_addr[2];
3399
3400 rqfcr = (qindex << 10) | RQFCR_AND |
3401 RQFCR_CMP_EXACT | RQFCR_PID_DAH;
3402
3403 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3404
3405 dest_mac_addr = (ndev->dev_addr[3] << 16) |
3406 (ndev->dev_addr[4] << 8) |
3407 ndev->dev_addr[5];
3408 rqfcr = (qindex << 10) | RQFCR_GPI |
3409 RQFCR_CMP_EXACT | RQFCR_PID_DAL;
3410 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3411 }
3412
3413 __gfar_filer_enable(priv);
3414}
3415
3416static void gfar_filer_restore_table(struct gfar_private *priv)
3417{
3418 u32 rqfcr, rqfpr;
3419 unsigned int i;
3420
3421 __gfar_filer_disable(priv);
3422
3423 for (i = 0; i <= MAX_FILER_IDX; i++) {
3424 rqfcr = priv->ftp_rqfcr[i];
3425 rqfpr = priv->ftp_rqfpr[i];
3426 gfar_write_filer(priv, i, rqfcr, rqfpr);
3427 }
3428
3429 __gfar_filer_enable(priv);
3430}
3431
3432
3433static void gfar_start_wol_filer(struct gfar_private *priv)
3434{
3435 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3436 u32 tempval;
3437 int i = 0;
3438
3439
3440 gfar_write(®s->rqueue, priv->rqueue);
3441
3442
3443 tempval = gfar_read(®s->dmactrl);
3444 tempval |= DMACTRL_INIT_SETTINGS;
3445 gfar_write(®s->dmactrl, tempval);
3446
3447
3448 tempval = gfar_read(®s->dmactrl);
3449 tempval &= ~DMACTRL_GRS;
3450 gfar_write(®s->dmactrl, tempval);
3451
3452 for (i = 0; i < priv->num_grps; i++) {
3453 regs = priv->gfargrp[i].regs;
3454
3455 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
3456
3457 gfar_write(®s->imask, IMASK_FGPI);
3458 }
3459
3460
3461 tempval = gfar_read(®s->maccfg1);
3462 tempval |= MACCFG1_RX_EN;
3463 gfar_write(®s->maccfg1, tempval);
3464}
3465
3466static int gfar_suspend(struct device *dev)
3467{
3468 struct gfar_private *priv = dev_get_drvdata(dev);
3469 struct net_device *ndev = priv->ndev;
3470 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3471 u32 tempval;
3472 u16 wol = priv->wol_opts;
3473
3474 if (!netif_running(ndev))
3475 return 0;
3476
3477 disable_napi(priv);
3478 netif_tx_lock(ndev);
3479 netif_device_detach(ndev);
3480 netif_tx_unlock(ndev);
3481
3482 gfar_halt(priv);
3483
3484 if (wol & GFAR_WOL_MAGIC) {
3485
3486 gfar_write(®s->imask, IMASK_MAG);
3487
3488
3489 tempval = gfar_read(®s->maccfg2);
3490 tempval |= MACCFG2_MPEN;
3491 gfar_write(®s->maccfg2, tempval);
3492
3493
3494 tempval = gfar_read(®s->maccfg1);
3495 tempval |= MACCFG1_RX_EN;
3496 gfar_write(®s->maccfg1, tempval);
3497
3498 } else if (wol & GFAR_WOL_FILER_UCAST) {
3499 gfar_filer_config_wol(priv);
3500 gfar_start_wol_filer(priv);
3501
3502 } else {
3503 phy_stop(ndev->phydev);
3504 }
3505
3506 return 0;
3507}
3508
3509static int gfar_resume(struct device *dev)
3510{
3511 struct gfar_private *priv = dev_get_drvdata(dev);
3512 struct net_device *ndev = priv->ndev;
3513 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3514 u32 tempval;
3515 u16 wol = priv->wol_opts;
3516
3517 if (!netif_running(ndev))
3518 return 0;
3519
3520 if (wol & GFAR_WOL_MAGIC) {
3521
3522 tempval = gfar_read(®s->maccfg2);
3523 tempval &= ~MACCFG2_MPEN;
3524 gfar_write(®s->maccfg2, tempval);
3525
3526 } else if (wol & GFAR_WOL_FILER_UCAST) {
3527
3528 gfar_halt(priv);
3529 gfar_filer_restore_table(priv);
3530
3531 } else {
3532 phy_start(ndev->phydev);
3533 }
3534
3535 gfar_start(priv);
3536
3537 netif_device_attach(ndev);
3538 enable_napi(priv);
3539
3540 return 0;
3541}
3542
3543static int gfar_restore(struct device *dev)
3544{
3545 struct gfar_private *priv = dev_get_drvdata(dev);
3546 struct net_device *ndev = priv->ndev;
3547
3548 if (!netif_running(ndev)) {
3549 netif_device_attach(ndev);
3550
3551 return 0;
3552 }
3553
3554 gfar_init_bds(ndev);
3555
3556 gfar_mac_reset(priv);
3557
3558 gfar_init_tx_rx_base(priv);
3559
3560 gfar_start(priv);
3561
3562 priv->oldlink = 0;
3563 priv->oldspeed = 0;
3564 priv->oldduplex = -1;
3565
3566 if (ndev->phydev)
3567 phy_start(ndev->phydev);
3568
3569 netif_device_attach(ndev);
3570 enable_napi(priv);
3571
3572 return 0;
3573}
3574
3575static const struct dev_pm_ops gfar_pm_ops = {
3576 .suspend = gfar_suspend,
3577 .resume = gfar_resume,
3578 .freeze = gfar_suspend,
3579 .thaw = gfar_resume,
3580 .restore = gfar_restore,
3581};
3582
3583#define GFAR_PM_OPS (&gfar_pm_ops)
3584
3585#else
3586
3587#define GFAR_PM_OPS NULL
3588
3589#endif
3590
3591static const struct of_device_id gfar_match[] =
3592{
3593 {
3594 .type = "network",
3595 .compatible = "gianfar",
3596 },
3597 {
3598 .compatible = "fsl,etsec2",
3599 },
3600 {},
3601};
3602MODULE_DEVICE_TABLE(of, gfar_match);
3603
3604
3605static struct platform_driver gfar_driver = {
3606 .driver = {
3607 .name = "fsl-gianfar",
3608 .pm = GFAR_PM_OPS,
3609 .of_match_table = gfar_match,
3610 },
3611 .probe = gfar_probe,
3612 .remove = gfar_remove,
3613};
3614
3615module_platform_driver(gfar_driver);
3616