1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/module.h>
23#include <linux/sched.h>
24#include <linux/string.h>
25#include <linux/errno.h>
26#include <linux/delay.h>
27#include <linux/types.h>
28#include <linux/pci.h>
29#include <linux/etherdevice.h>
30#include <linux/skbuff.h>
31#include <linux/crc32.h>
32#include <linux/ethtool.h>
33#include <linux/mii.h>
34#include <linux/bitops.h>
35#include <linux/workqueue.h>
36#include <linux/of.h>
37#include <linux/of_address.h>
38#include <linux/of_irq.h>
39#include <linux/of_net.h>
40#include <linux/of_mdio.h>
41#include <linux/platform_device.h>
42#include <linux/slab.h>
43
44#include <asm/processor.h>
45#include <asm/io.h>
46#include <asm/dma.h>
47#include <linux/uaccess.h>
48#include <asm/dcr.h>
49#include <asm/dcr-regs.h>
50
51#include "core.h"
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70#define DRV_NAME "emac"
71#define DRV_VERSION "3.54"
72#define DRV_DESC "PPC 4xx OCP EMAC driver"
73
74MODULE_DESCRIPTION(DRV_DESC);
75MODULE_AUTHOR
76 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
77MODULE_LICENSE("GPL");
78
79
80#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
81
82
83
84
85#define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
86
87
88
89
90
91
92
93
94
95static u32 busy_phy_map;
96static DEFINE_MUTEX(emac_phy_map_lock);
97
98
99
100
101static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
102
103
104
105
106
107
108
109
110
111
112
113
114
115#define EMAC_BOOT_LIST_SIZE 4
116static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
117
118
119#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
120
121
122
123
124static inline void emac_report_timeout_error(struct emac_instance *dev,
125 const char *error)
126{
127 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
128 EMAC_FTR_460EX_PHY_CLK_FIX |
129 EMAC_FTR_440EP_PHY_CLK_FIX))
130 DBG(dev, "%s" NL, error);
131 else if (net_ratelimit())
132 printk(KERN_ERR "%pOF: %s\n", dev->ofdev->dev.of_node, error);
133}
134
135
136
137
138
139static inline void emac_rx_clk_tx(struct emac_instance *dev)
140{
141#ifdef CONFIG_PPC_DCR_NATIVE
142 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
143 dcri_clrset(SDR0, SDR0_MFR,
144 0, SDR0_MFR_ECS >> dev->cell_index);
145#endif
146}
147
148static inline void emac_rx_clk_default(struct emac_instance *dev)
149{
150#ifdef CONFIG_PPC_DCR_NATIVE
151 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
152 dcri_clrset(SDR0, SDR0_MFR,
153 SDR0_MFR_ECS >> dev->cell_index, 0);
154#endif
155}
156
157
158#define PHY_POLL_LINK_ON HZ
159#define PHY_POLL_LINK_OFF (HZ / 5)
160
161
162
163
164#define STOP_TIMEOUT_10 1230
165#define STOP_TIMEOUT_100 124
166#define STOP_TIMEOUT_1000 13
167#define STOP_TIMEOUT_1000_JUMBO 73
168
169static unsigned char default_mcast_addr[] = {
170 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
171};
172
173
174static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
175 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
176 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
177 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
178 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
179 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
180 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
181 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
182 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
183 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
184 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
185 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
186 "tx_bd_excessive_collisions", "tx_bd_late_collision",
187 "tx_bd_multple_collisions", "tx_bd_single_collision",
188 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
189 "tx_errors"
190};
191
192static irqreturn_t emac_irq(int irq, void *dev_instance);
193static void emac_clean_tx_ring(struct emac_instance *dev);
194static void __emac_set_multicast_list(struct emac_instance *dev);
195
196static inline int emac_phy_supports_gige(int phy_mode)
197{
198 return phy_interface_mode_is_rgmii(phy_mode) ||
199 phy_mode == PHY_INTERFACE_MODE_GMII ||
200 phy_mode == PHY_INTERFACE_MODE_SGMII ||
201 phy_mode == PHY_INTERFACE_MODE_TBI ||
202 phy_mode == PHY_INTERFACE_MODE_RTBI;
203}
204
205static inline int emac_phy_gpcs(int phy_mode)
206{
207 return phy_mode == PHY_INTERFACE_MODE_SGMII ||
208 phy_mode == PHY_INTERFACE_MODE_TBI ||
209 phy_mode == PHY_INTERFACE_MODE_RTBI;
210}
211
212static inline void emac_tx_enable(struct emac_instance *dev)
213{
214 struct emac_regs __iomem *p = dev->emacp;
215 u32 r;
216
217 DBG(dev, "tx_enable" NL);
218
219 r = in_be32(&p->mr0);
220 if (!(r & EMAC_MR0_TXE))
221 out_be32(&p->mr0, r | EMAC_MR0_TXE);
222}
223
224static void emac_tx_disable(struct emac_instance *dev)
225{
226 struct emac_regs __iomem *p = dev->emacp;
227 u32 r;
228
229 DBG(dev, "tx_disable" NL);
230
231 r = in_be32(&p->mr0);
232 if (r & EMAC_MR0_TXE) {
233 int n = dev->stop_timeout;
234 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
235 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
236 udelay(1);
237 --n;
238 }
239 if (unlikely(!n))
240 emac_report_timeout_error(dev, "TX disable timeout");
241 }
242}
243
244static void emac_rx_enable(struct emac_instance *dev)
245{
246 struct emac_regs __iomem *p = dev->emacp;
247 u32 r;
248
249 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
250 goto out;
251
252 DBG(dev, "rx_enable" NL);
253
254 r = in_be32(&p->mr0);
255 if (!(r & EMAC_MR0_RXE)) {
256 if (unlikely(!(r & EMAC_MR0_RXI))) {
257
258 int n = dev->stop_timeout;
259 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
260 udelay(1);
261 --n;
262 }
263 if (unlikely(!n))
264 emac_report_timeout_error(dev,
265 "RX disable timeout");
266 }
267 out_be32(&p->mr0, r | EMAC_MR0_RXE);
268 }
269 out:
270 ;
271}
272
273static void emac_rx_disable(struct emac_instance *dev)
274{
275 struct emac_regs __iomem *p = dev->emacp;
276 u32 r;
277
278 DBG(dev, "rx_disable" NL);
279
280 r = in_be32(&p->mr0);
281 if (r & EMAC_MR0_RXE) {
282 int n = dev->stop_timeout;
283 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
284 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
285 udelay(1);
286 --n;
287 }
288 if (unlikely(!n))
289 emac_report_timeout_error(dev, "RX disable timeout");
290 }
291}
292
293static inline void emac_netif_stop(struct emac_instance *dev)
294{
295 netif_tx_lock_bh(dev->ndev);
296 netif_addr_lock(dev->ndev);
297 dev->no_mcast = 1;
298 netif_addr_unlock(dev->ndev);
299 netif_tx_unlock_bh(dev->ndev);
300 netif_trans_update(dev->ndev);
301 mal_poll_disable(dev->mal, &dev->commac);
302 netif_tx_disable(dev->ndev);
303}
304
305static inline void emac_netif_start(struct emac_instance *dev)
306{
307 netif_tx_lock_bh(dev->ndev);
308 netif_addr_lock(dev->ndev);
309 dev->no_mcast = 0;
310 if (dev->mcast_pending && netif_running(dev->ndev))
311 __emac_set_multicast_list(dev);
312 netif_addr_unlock(dev->ndev);
313 netif_tx_unlock_bh(dev->ndev);
314
315 netif_wake_queue(dev->ndev);
316
317
318
319
320
321
322 mal_poll_enable(dev->mal, &dev->commac);
323}
324
325static inline void emac_rx_disable_async(struct emac_instance *dev)
326{
327 struct emac_regs __iomem *p = dev->emacp;
328 u32 r;
329
330 DBG(dev, "rx_disable_async" NL);
331
332 r = in_be32(&p->mr0);
333 if (r & EMAC_MR0_RXE)
334 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
335}
336
337static int emac_reset(struct emac_instance *dev)
338{
339 struct emac_regs __iomem *p = dev->emacp;
340 int n = 20;
341 bool __maybe_unused try_internal_clock = false;
342
343 DBG(dev, "reset" NL);
344
345 if (!dev->reset_failed) {
346
347
348
349 emac_rx_disable(dev);
350 emac_tx_disable(dev);
351 }
352
353#ifdef CONFIG_PPC_DCR_NATIVE
354do_retry:
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
373 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
374 dev->phy_map == 0xffffffff)) {
375
376 dcri_clrset(SDR0, SDR0_ETH_CFG,
377 0, SDR0_ETH_CFG_ECS << dev->cell_index);
378 } else {
379
380 dcri_clrset(SDR0, SDR0_ETH_CFG,
381 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
382 }
383 }
384#endif
385
386 out_be32(&p->mr0, EMAC_MR0_SRST);
387 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
388 --n;
389
390#ifdef CONFIG_PPC_DCR_NATIVE
391 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
392 if (!n && !try_internal_clock) {
393
394 n = 20;
395 try_internal_clock = true;
396 goto do_retry;
397 }
398
399 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
400 dev->phy_map == 0xffffffff)) {
401
402 dcri_clrset(SDR0, SDR0_ETH_CFG,
403 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
404 }
405 }
406#endif
407
408 if (n) {
409 dev->reset_failed = 0;
410 return 0;
411 } else {
412 emac_report_timeout_error(dev, "reset timeout");
413 dev->reset_failed = 1;
414 return -ETIMEDOUT;
415 }
416}
417
418static void emac_hash_mc(struct emac_instance *dev)
419{
420 const int regs = EMAC_XAHT_REGS(dev);
421 u32 *gaht_base = emac_gaht_base(dev);
422 u32 gaht_temp[EMAC_XAHT_MAX_REGS];
423 struct netdev_hw_addr *ha;
424 int i;
425
426 DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
427
428 memset(gaht_temp, 0, sizeof (gaht_temp));
429
430 netdev_for_each_mc_addr(ha, dev->ndev) {
431 int slot, reg, mask;
432 DBG2(dev, "mc %pM" NL, ha->addr);
433
434 slot = EMAC_XAHT_CRC_TO_SLOT(dev,
435 ether_crc(ETH_ALEN, ha->addr));
436 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
437 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
438
439 gaht_temp[reg] |= mask;
440 }
441
442 for (i = 0; i < regs; i++)
443 out_be32(gaht_base + i, gaht_temp[i]);
444}
445
446static inline u32 emac_iff2rmr(struct net_device *ndev)
447{
448 struct emac_instance *dev = netdev_priv(ndev);
449 u32 r;
450
451 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
452
453 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
454 r |= EMAC4_RMR_BASE;
455 else
456 r |= EMAC_RMR_BASE;
457
458 if (ndev->flags & IFF_PROMISC)
459 r |= EMAC_RMR_PME;
460 else if (ndev->flags & IFF_ALLMULTI ||
461 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
462 r |= EMAC_RMR_PMME;
463 else if (!netdev_mc_empty(ndev))
464 r |= EMAC_RMR_MAE;
465
466 if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
467 r &= ~EMAC4_RMR_MJS_MASK;
468 r |= EMAC4_RMR_MJS(ndev->mtu);
469 }
470
471 return r;
472}
473
474static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
475{
476 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
477
478 DBG2(dev, "__emac_calc_base_mr1" NL);
479
480 switch(tx_size) {
481 case 2048:
482 ret |= EMAC_MR1_TFS_2K;
483 break;
484 default:
485 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
486 dev->ndev->name, tx_size);
487 }
488
489 switch(rx_size) {
490 case 16384:
491 ret |= EMAC_MR1_RFS_16K;
492 break;
493 case 4096:
494 ret |= EMAC_MR1_RFS_4K;
495 break;
496 default:
497 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
498 dev->ndev->name, rx_size);
499 }
500
501 return ret;
502}
503
504static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
505{
506 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
507 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
508
509 DBG2(dev, "__emac4_calc_base_mr1" NL);
510
511 switch(tx_size) {
512 case 16384:
513 ret |= EMAC4_MR1_TFS_16K;
514 break;
515 case 8192:
516 ret |= EMAC4_MR1_TFS_8K;
517 break;
518 case 4096:
519 ret |= EMAC4_MR1_TFS_4K;
520 break;
521 case 2048:
522 ret |= EMAC4_MR1_TFS_2K;
523 break;
524 default:
525 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
526 dev->ndev->name, tx_size);
527 }
528
529 switch(rx_size) {
530 case 16384:
531 ret |= EMAC4_MR1_RFS_16K;
532 break;
533 case 8192:
534 ret |= EMAC4_MR1_RFS_8K;
535 break;
536 case 4096:
537 ret |= EMAC4_MR1_RFS_4K;
538 break;
539 case 2048:
540 ret |= EMAC4_MR1_RFS_2K;
541 break;
542 default:
543 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
544 dev->ndev->name, rx_size);
545 }
546
547 return ret;
548}
549
550static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
551{
552 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
553 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
554 __emac_calc_base_mr1(dev, tx_size, rx_size);
555}
556
557static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
558{
559 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
560 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
561 else
562 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
563}
564
565static inline u32 emac_calc_rwmr(struct emac_instance *dev,
566 unsigned int low, unsigned int high)
567{
568 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
569 return (low << 22) | ( (high & 0x3ff) << 6);
570 else
571 return (low << 23) | ( (high & 0x1ff) << 7);
572}
573
574static int emac_configure(struct emac_instance *dev)
575{
576 struct emac_regs __iomem *p = dev->emacp;
577 struct net_device *ndev = dev->ndev;
578 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
579 u32 r, mr1 = 0;
580
581 DBG(dev, "configure" NL);
582
583 if (!link) {
584 out_be32(&p->mr1, in_be32(&p->mr1)
585 | EMAC_MR1_FDE | EMAC_MR1_ILE);
586 udelay(100);
587 } else if (emac_reset(dev) < 0)
588 return -ETIMEDOUT;
589
590 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
591 tah_reset(dev->tah_dev);
592
593 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
594 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
595
596
597 tx_size = dev->tx_fifo_size;
598 rx_size = dev->rx_fifo_size;
599
600
601 if (!link)
602 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
603
604
605 else if (dev->phy.duplex == DUPLEX_FULL)
606 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
607
608
609 dev->stop_timeout = STOP_TIMEOUT_10;
610 switch (dev->phy.speed) {
611 case SPEED_1000:
612 if (emac_phy_gpcs(dev->phy.mode)) {
613 mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
614 (dev->phy.gpcs_address != 0xffffffff) ?
615 dev->phy.gpcs_address : dev->phy.address);
616
617
618
619
620 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
621 } else
622 mr1 |= EMAC_MR1_MF_1000;
623
624
625 tx_size = dev->tx_fifo_size_gige;
626 rx_size = dev->rx_fifo_size_gige;
627
628 if (dev->ndev->mtu > ETH_DATA_LEN) {
629 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
630 mr1 |= EMAC4_MR1_JPSM;
631 else
632 mr1 |= EMAC_MR1_JPSM;
633 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
634 } else
635 dev->stop_timeout = STOP_TIMEOUT_1000;
636 break;
637 case SPEED_100:
638 mr1 |= EMAC_MR1_MF_100;
639 dev->stop_timeout = STOP_TIMEOUT_100;
640 break;
641 default:
642 break;
643 }
644
645 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
646 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
647 dev->phy.speed);
648 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
649 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
650
651
652
653
654 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
655 dev->phy.duplex == DUPLEX_FULL) {
656 if (dev->phy.pause)
657 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
658 else if (dev->phy.asym_pause)
659 mr1 |= EMAC_MR1_APP;
660 }
661
662
663 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
664 out_be32(&p->mr1, mr1);
665
666
667 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
668 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
669 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
670 ndev->dev_addr[5]);
671
672
673 out_be32(&p->vtpid, 0x8100);
674
675
676 r = emac_iff2rmr(ndev);
677 if (r & EMAC_RMR_MAE)
678 emac_hash_mc(dev);
679 out_be32(&p->rmr, r);
680
681
682 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
683 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
684 tx_size / 2 / dev->fifo_entry_size);
685 else
686 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
687 tx_size / 2 / dev->fifo_entry_size);
688 out_be32(&p->tmr1, r);
689 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
711 rx_size / 4 / dev->fifo_entry_size);
712 out_be32(&p->rwmr, r);
713
714
715 out_be32(&p->ptr, 0xffff);
716
717
718 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
719 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
720 EMAC_ISR_IRE | EMAC_ISR_TE;
721 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
722 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE
723;
724 out_be32(&p->iser, r);
725
726
727 if (emac_phy_gpcs(dev->phy.mode)) {
728 if (dev->phy.gpcs_address != 0xffffffff)
729 emac_mii_reset_gpcs(&dev->phy);
730 else
731 emac_mii_reset_phy(&dev->phy);
732 }
733
734 return 0;
735}
736
737static void emac_reinitialize(struct emac_instance *dev)
738{
739 DBG(dev, "reinitialize" NL);
740
741 emac_netif_stop(dev);
742 if (!emac_configure(dev)) {
743 emac_tx_enable(dev);
744 emac_rx_enable(dev);
745 }
746 emac_netif_start(dev);
747}
748
749static void emac_full_tx_reset(struct emac_instance *dev)
750{
751 DBG(dev, "full_tx_reset" NL);
752
753 emac_tx_disable(dev);
754 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
755 emac_clean_tx_ring(dev);
756 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
757
758 emac_configure(dev);
759
760 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
761 emac_tx_enable(dev);
762 emac_rx_enable(dev);
763}
764
765static void emac_reset_work(struct work_struct *work)
766{
767 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
768
769 DBG(dev, "reset_work" NL);
770
771 mutex_lock(&dev->link_lock);
772 if (dev->opened) {
773 emac_netif_stop(dev);
774 emac_full_tx_reset(dev);
775 emac_netif_start(dev);
776 }
777 mutex_unlock(&dev->link_lock);
778}
779
780static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
781{
782 struct emac_instance *dev = netdev_priv(ndev);
783
784 DBG(dev, "tx_timeout" NL);
785
786 schedule_work(&dev->reset_work);
787}
788
789
790static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
791{
792 int done = !!(stacr & EMAC_STACR_OC);
793
794 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
795 done = !done;
796
797 return done;
798};
799
800static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
801{
802 struct emac_regs __iomem *p = dev->emacp;
803 u32 r = 0;
804 int n, err = -ETIMEDOUT;
805
806 mutex_lock(&dev->mdio_lock);
807
808 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
809
810
811 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
812 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
813 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
814 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
815
816
817 n = 20;
818 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
819 udelay(1);
820 if (!--n) {
821 DBG2(dev, " -> timeout wait idle\n");
822 goto bail;
823 }
824 }
825
826
827 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
828 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
829 else
830 r = EMAC_STACR_BASE(dev->opb_bus_freq);
831 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
832 r |= EMAC_STACR_OC;
833 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
834 r |= EMACX_STACR_STAC_READ;
835 else
836 r |= EMAC_STACR_STAC_READ;
837 r |= (reg & EMAC_STACR_PRA_MASK)
838 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
839 out_be32(&p->stacr, r);
840
841
842 n = 200;
843 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
844 udelay(1);
845 if (!--n) {
846 DBG2(dev, " -> timeout wait complete\n");
847 goto bail;
848 }
849 }
850
851 if (unlikely(r & EMAC_STACR_PHYE)) {
852 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
853 err = -EREMOTEIO;
854 goto bail;
855 }
856
857 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
858
859 DBG2(dev, "mdio_read -> %04x" NL, r);
860 err = 0;
861 bail:
862 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
863 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
864 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
865 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
866 mutex_unlock(&dev->mdio_lock);
867
868 return err == 0 ? r : err;
869}
870
871static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
872 u16 val)
873{
874 struct emac_regs __iomem *p = dev->emacp;
875 u32 r = 0;
876 int n;
877
878 mutex_lock(&dev->mdio_lock);
879
880 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
881
882
883 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
884 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
885 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
886 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
887
888
889 n = 20;
890 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
891 udelay(1);
892 if (!--n) {
893 DBG2(dev, " -> timeout wait idle\n");
894 goto bail;
895 }
896 }
897
898
899 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
900 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
901 else
902 r = EMAC_STACR_BASE(dev->opb_bus_freq);
903 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
904 r |= EMAC_STACR_OC;
905 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
906 r |= EMACX_STACR_STAC_WRITE;
907 else
908 r |= EMAC_STACR_STAC_WRITE;
909 r |= (reg & EMAC_STACR_PRA_MASK) |
910 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
911 (val << EMAC_STACR_PHYD_SHIFT);
912 out_be32(&p->stacr, r);
913
914
915 n = 200;
916 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
917 udelay(1);
918 if (!--n) {
919 DBG2(dev, " -> timeout wait complete\n");
920 goto bail;
921 }
922 }
923 bail:
924 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
925 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
926 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
927 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
928 mutex_unlock(&dev->mdio_lock);
929}
930
931static int emac_mdio_read(struct net_device *ndev, int id, int reg)
932{
933 struct emac_instance *dev = netdev_priv(ndev);
934 int res;
935
936 res = __emac_mdio_read((dev->mdio_instance &&
937 dev->phy.gpcs_address != id) ?
938 dev->mdio_instance : dev,
939 (u8) id, (u8) reg);
940 return res;
941}
942
943static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
944{
945 struct emac_instance *dev = netdev_priv(ndev);
946
947 __emac_mdio_write((dev->mdio_instance &&
948 dev->phy.gpcs_address != id) ?
949 dev->mdio_instance : dev,
950 (u8) id, (u8) reg, (u16) val);
951}
952
953
954static void __emac_set_multicast_list(struct emac_instance *dev)
955{
956 struct emac_regs __iomem *p = dev->emacp;
957 u32 rmr = emac_iff2rmr(dev->ndev);
958
959 DBG(dev, "__multicast %08x" NL, rmr);
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978 dev->mcast_pending = 0;
979 emac_rx_disable(dev);
980 if (rmr & EMAC_RMR_MAE)
981 emac_hash_mc(dev);
982 out_be32(&p->rmr, rmr);
983 emac_rx_enable(dev);
984}
985
986
987static void emac_set_multicast_list(struct net_device *ndev)
988{
989 struct emac_instance *dev = netdev_priv(ndev);
990
991 DBG(dev, "multicast" NL);
992
993 BUG_ON(!netif_running(dev->ndev));
994
995 if (dev->no_mcast) {
996 dev->mcast_pending = 1;
997 return;
998 }
999
1000 mutex_lock(&dev->link_lock);
1001 __emac_set_multicast_list(dev);
1002 mutex_unlock(&dev->link_lock);
1003}
1004
1005static int emac_set_mac_address(struct net_device *ndev, void *sa)
1006{
1007 struct emac_instance *dev = netdev_priv(ndev);
1008 struct sockaddr *addr = sa;
1009 struct emac_regs __iomem *p = dev->emacp;
1010
1011 if (!is_valid_ether_addr(addr->sa_data))
1012 return -EADDRNOTAVAIL;
1013
1014 mutex_lock(&dev->link_lock);
1015
1016 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1017
1018 emac_rx_disable(dev);
1019 emac_tx_disable(dev);
1020 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
1021 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
1022 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
1023 ndev->dev_addr[5]);
1024 emac_tx_enable(dev);
1025 emac_rx_enable(dev);
1026
1027 mutex_unlock(&dev->link_lock);
1028
1029 return 0;
1030}
1031
1032static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
1033{
1034 int rx_sync_size = emac_rx_sync_size(new_mtu);
1035 int rx_skb_size = emac_rx_skb_size(new_mtu);
1036 int i, ret = 0;
1037 int mr1_jumbo_bit_change = 0;
1038
1039 mutex_lock(&dev->link_lock);
1040 emac_netif_stop(dev);
1041 emac_rx_disable(dev);
1042 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1043
1044 if (dev->rx_sg_skb) {
1045 ++dev->estats.rx_dropped_resize;
1046 dev_kfree_skb(dev->rx_sg_skb);
1047 dev->rx_sg_skb = NULL;
1048 }
1049
1050
1051
1052
1053
1054 for (i = 0; i < NUM_RX_BUFF; ++i) {
1055 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
1056 ++dev->estats.rx_dropped_resize;
1057
1058 dev->rx_desc[i].data_len = 0;
1059 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
1060 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1061 }
1062
1063
1064 if (rx_skb_size <= dev->rx_skb_size)
1065 goto skip;
1066
1067
1068 for (i = 0; i < NUM_RX_BUFF; ++i) {
1069 struct sk_buff *skb;
1070
1071 skb = netdev_alloc_skb_ip_align(dev->ndev, rx_skb_size);
1072 if (!skb) {
1073 ret = -ENOMEM;
1074 goto oom;
1075 }
1076
1077 BUG_ON(!dev->rx_skb[i]);
1078 dev_kfree_skb(dev->rx_skb[i]);
1079
1080 dev->rx_desc[i].data_ptr =
1081 dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
1082 rx_sync_size, DMA_FROM_DEVICE)
1083 + NET_IP_ALIGN;
1084 dev->rx_skb[i] = skb;
1085 }
1086 skip:
1087
1088 if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1089 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1090 (dev->ndev->mtu > ETH_DATA_LEN);
1091 } else {
1092 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1093 (dev->ndev->mtu > ETH_DATA_LEN);
1094 }
1095
1096 if (mr1_jumbo_bit_change) {
1097
1098 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1099
1100 dev->ndev->mtu = new_mtu;
1101 emac_full_tx_reset(dev);
1102 }
1103
1104 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1105 oom:
1106
1107 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1108 dev->rx_slot = 0;
1109 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1110 emac_rx_enable(dev);
1111 emac_netif_start(dev);
1112 mutex_unlock(&dev->link_lock);
1113
1114 return ret;
1115}
1116
1117
1118static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1119{
1120 struct emac_instance *dev = netdev_priv(ndev);
1121 int ret = 0;
1122
1123 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1124
1125 if (netif_running(ndev)) {
1126
1127 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1128 ret = emac_resize_rx_ring(dev, new_mtu);
1129 }
1130
1131 if (!ret) {
1132 ndev->mtu = new_mtu;
1133 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1134 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1135 }
1136
1137 return ret;
1138}
1139
1140static void emac_clean_tx_ring(struct emac_instance *dev)
1141{
1142 int i;
1143
1144 for (i = 0; i < NUM_TX_BUFF; ++i) {
1145 if (dev->tx_skb[i]) {
1146 dev_kfree_skb(dev->tx_skb[i]);
1147 dev->tx_skb[i] = NULL;
1148 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1149 ++dev->estats.tx_dropped;
1150 }
1151 dev->tx_desc[i].ctrl = 0;
1152 dev->tx_desc[i].data_ptr = 0;
1153 }
1154}
1155
1156static void emac_clean_rx_ring(struct emac_instance *dev)
1157{
1158 int i;
1159
1160 for (i = 0; i < NUM_RX_BUFF; ++i)
1161 if (dev->rx_skb[i]) {
1162 dev->rx_desc[i].ctrl = 0;
1163 dev_kfree_skb(dev->rx_skb[i]);
1164 dev->rx_skb[i] = NULL;
1165 dev->rx_desc[i].data_ptr = 0;
1166 }
1167
1168 if (dev->rx_sg_skb) {
1169 dev_kfree_skb(dev->rx_sg_skb);
1170 dev->rx_sg_skb = NULL;
1171 }
1172}
1173
1174static int
1175__emac_prepare_rx_skb(struct sk_buff *skb, struct emac_instance *dev, int slot)
1176{
1177 if (unlikely(!skb))
1178 return -ENOMEM;
1179
1180 dev->rx_skb[slot] = skb;
1181 dev->rx_desc[slot].data_len = 0;
1182
1183 dev->rx_desc[slot].data_ptr =
1184 dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
1185 dev->rx_sync_size, DMA_FROM_DEVICE) + NET_IP_ALIGN;
1186 wmb();
1187 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1188 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1189
1190 return 0;
1191}
1192
1193static int
1194emac_alloc_rx_skb(struct emac_instance *dev, int slot)
1195{
1196 struct sk_buff *skb;
1197
1198 skb = __netdev_alloc_skb_ip_align(dev->ndev, dev->rx_skb_size,
1199 GFP_KERNEL);
1200
1201 return __emac_prepare_rx_skb(skb, dev, slot);
1202}
1203
1204static int
1205emac_alloc_rx_skb_napi(struct emac_instance *dev, int slot)
1206{
1207 struct sk_buff *skb;
1208
1209 skb = napi_alloc_skb(&dev->mal->napi, dev->rx_skb_size);
1210
1211 return __emac_prepare_rx_skb(skb, dev, slot);
1212}
1213
1214static void emac_print_link_status(struct emac_instance *dev)
1215{
1216 if (netif_carrier_ok(dev->ndev))
1217 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1218 dev->ndev->name, dev->phy.speed,
1219 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1220 dev->phy.pause ? ", pause enabled" :
1221 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1222 else
1223 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1224}
1225
1226
1227static int emac_open(struct net_device *ndev)
1228{
1229 struct emac_instance *dev = netdev_priv(ndev);
1230 int err, i;
1231
1232 DBG(dev, "open" NL);
1233
1234
1235 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1236 if (err) {
1237 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1238 ndev->name, dev->emac_irq);
1239 return err;
1240 }
1241
1242
1243 for (i = 0; i < NUM_RX_BUFF; ++i)
1244 if (emac_alloc_rx_skb(dev, i)) {
1245 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1246 ndev->name);
1247 goto oom;
1248 }
1249
1250 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1251 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1252 dev->rx_sg_skb = NULL;
1253
1254 mutex_lock(&dev->link_lock);
1255 dev->opened = 1;
1256
1257
1258
1259 if (dev->phy.address >= 0) {
1260 int link_poll_interval;
1261 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1262 dev->phy.def->ops->read_link(&dev->phy);
1263 emac_rx_clk_default(dev);
1264 netif_carrier_on(dev->ndev);
1265 link_poll_interval = PHY_POLL_LINK_ON;
1266 } else {
1267 emac_rx_clk_tx(dev);
1268 netif_carrier_off(dev->ndev);
1269 link_poll_interval = PHY_POLL_LINK_OFF;
1270 }
1271 dev->link_polling = 1;
1272 wmb();
1273 schedule_delayed_work(&dev->link_work, link_poll_interval);
1274 emac_print_link_status(dev);
1275 } else
1276 netif_carrier_on(dev->ndev);
1277
1278
1279 dev_mc_add_global(ndev, default_mcast_addr);
1280
1281 emac_configure(dev);
1282 mal_poll_add(dev->mal, &dev->commac);
1283 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1284 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1285 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1286 emac_tx_enable(dev);
1287 emac_rx_enable(dev);
1288 emac_netif_start(dev);
1289
1290 mutex_unlock(&dev->link_lock);
1291
1292 return 0;
1293 oom:
1294 emac_clean_rx_ring(dev);
1295 free_irq(dev->emac_irq, dev);
1296
1297 return -ENOMEM;
1298}
1299
1300
1301#if 0
1302static int emac_link_differs(struct emac_instance *dev)
1303{
1304 u32 r = in_be32(&dev->emacp->mr1);
1305
1306 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1307 int speed, pause, asym_pause;
1308
1309 if (r & EMAC_MR1_MF_1000)
1310 speed = SPEED_1000;
1311 else if (r & EMAC_MR1_MF_100)
1312 speed = SPEED_100;
1313 else
1314 speed = SPEED_10;
1315
1316 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1317 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1318 pause = 1;
1319 asym_pause = 0;
1320 break;
1321 case EMAC_MR1_APP:
1322 pause = 0;
1323 asym_pause = 1;
1324 break;
1325 default:
1326 pause = asym_pause = 0;
1327 }
1328 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1329 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1330}
1331#endif
1332
1333static void emac_link_timer(struct work_struct *work)
1334{
1335 struct emac_instance *dev =
1336 container_of(to_delayed_work(work),
1337 struct emac_instance, link_work);
1338 int link_poll_interval;
1339
1340 mutex_lock(&dev->link_lock);
1341 DBG2(dev, "link timer" NL);
1342
1343 if (!dev->opened)
1344 goto bail;
1345
1346 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1347 if (!netif_carrier_ok(dev->ndev)) {
1348 emac_rx_clk_default(dev);
1349
1350 dev->phy.def->ops->read_link(&dev->phy);
1351
1352 netif_carrier_on(dev->ndev);
1353 emac_netif_stop(dev);
1354 emac_full_tx_reset(dev);
1355 emac_netif_start(dev);
1356 emac_print_link_status(dev);
1357 }
1358 link_poll_interval = PHY_POLL_LINK_ON;
1359 } else {
1360 if (netif_carrier_ok(dev->ndev)) {
1361 emac_rx_clk_tx(dev);
1362 netif_carrier_off(dev->ndev);
1363 netif_tx_disable(dev->ndev);
1364 emac_reinitialize(dev);
1365 emac_print_link_status(dev);
1366 }
1367 link_poll_interval = PHY_POLL_LINK_OFF;
1368 }
1369 schedule_delayed_work(&dev->link_work, link_poll_interval);
1370 bail:
1371 mutex_unlock(&dev->link_lock);
1372}
1373
1374static void emac_force_link_update(struct emac_instance *dev)
1375{
1376 netif_carrier_off(dev->ndev);
1377 smp_rmb();
1378 if (dev->link_polling) {
1379 cancel_delayed_work_sync(&dev->link_work);
1380 if (dev->link_polling)
1381 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1382 }
1383}
1384
1385
1386static int emac_close(struct net_device *ndev)
1387{
1388 struct emac_instance *dev = netdev_priv(ndev);
1389
1390 DBG(dev, "close" NL);
1391
1392 if (dev->phy.address >= 0) {
1393 dev->link_polling = 0;
1394 cancel_delayed_work_sync(&dev->link_work);
1395 }
1396 mutex_lock(&dev->link_lock);
1397 emac_netif_stop(dev);
1398 dev->opened = 0;
1399 mutex_unlock(&dev->link_lock);
1400
1401 emac_rx_disable(dev);
1402 emac_tx_disable(dev);
1403 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1404 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1405 mal_poll_del(dev->mal, &dev->commac);
1406
1407 emac_clean_tx_ring(dev);
1408 emac_clean_rx_ring(dev);
1409
1410 free_irq(dev->emac_irq, dev);
1411
1412 netif_carrier_off(ndev);
1413
1414 return 0;
1415}
1416
1417static inline u16 emac_tx_csum(struct emac_instance *dev,
1418 struct sk_buff *skb)
1419{
1420 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1421 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1422 ++dev->stats.tx_packets_csum;
1423 return EMAC_TX_CTRL_TAH_CSUM;
1424 }
1425 return 0;
1426}
1427
1428static inline netdev_tx_t emac_xmit_finish(struct emac_instance *dev, int len)
1429{
1430 struct emac_regs __iomem *p = dev->emacp;
1431 struct net_device *ndev = dev->ndev;
1432
1433
1434
1435
1436
1437 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1438 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1439 else
1440 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1441
1442 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1443 netif_stop_queue(ndev);
1444 DBG2(dev, "stopped TX queue" NL);
1445 }
1446
1447 netif_trans_update(ndev);
1448 ++dev->stats.tx_packets;
1449 dev->stats.tx_bytes += len;
1450
1451 return NETDEV_TX_OK;
1452}
1453
1454
1455static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1456{
1457 struct emac_instance *dev = netdev_priv(ndev);
1458 unsigned int len = skb->len;
1459 int slot;
1460
1461 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1462 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1463
1464 slot = dev->tx_slot++;
1465 if (dev->tx_slot == NUM_TX_BUFF) {
1466 dev->tx_slot = 0;
1467 ctrl |= MAL_TX_CTRL_WRAP;
1468 }
1469
1470 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1471
1472 dev->tx_skb[slot] = skb;
1473 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1474 skb->data, len,
1475 DMA_TO_DEVICE);
1476 dev->tx_desc[slot].data_len = (u16) len;
1477 wmb();
1478 dev->tx_desc[slot].ctrl = ctrl;
1479
1480 return emac_xmit_finish(dev, len);
1481}
1482
1483static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1484 u32 pd, int len, int last, u16 base_ctrl)
1485{
1486 while (1) {
1487 u16 ctrl = base_ctrl;
1488 int chunk = min(len, MAL_MAX_TX_SIZE);
1489 len -= chunk;
1490
1491 slot = (slot + 1) % NUM_TX_BUFF;
1492
1493 if (last && !len)
1494 ctrl |= MAL_TX_CTRL_LAST;
1495 if (slot == NUM_TX_BUFF - 1)
1496 ctrl |= MAL_TX_CTRL_WRAP;
1497
1498 dev->tx_skb[slot] = NULL;
1499 dev->tx_desc[slot].data_ptr = pd;
1500 dev->tx_desc[slot].data_len = (u16) chunk;
1501 dev->tx_desc[slot].ctrl = ctrl;
1502 ++dev->tx_cnt;
1503
1504 if (!len)
1505 break;
1506
1507 pd += chunk;
1508 }
1509 return slot;
1510}
1511
1512
1513static netdev_tx_t
1514emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1515{
1516 struct emac_instance *dev = netdev_priv(ndev);
1517 int nr_frags = skb_shinfo(skb)->nr_frags;
1518 int len = skb->len, chunk;
1519 int slot, i;
1520 u16 ctrl;
1521 u32 pd;
1522
1523
1524 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1525 return emac_start_xmit(skb, ndev);
1526
1527 len -= skb->data_len;
1528
1529
1530
1531
1532
1533 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1534 goto stop_queue;
1535
1536 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1537 emac_tx_csum(dev, skb);
1538 slot = dev->tx_slot;
1539
1540
1541 dev->tx_skb[slot] = NULL;
1542 chunk = min(len, MAL_MAX_TX_SIZE);
1543 dev->tx_desc[slot].data_ptr = pd =
1544 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1545 dev->tx_desc[slot].data_len = (u16) chunk;
1546 len -= chunk;
1547 if (unlikely(len))
1548 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1549 ctrl);
1550
1551 for (i = 0; i < nr_frags; ++i) {
1552 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1553 len = skb_frag_size(frag);
1554
1555 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1556 goto undo_frame;
1557
1558 pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1559 DMA_TO_DEVICE);
1560
1561 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1562 ctrl);
1563 }
1564
1565 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1566
1567
1568 dev->tx_skb[slot] = skb;
1569
1570
1571 if (dev->tx_slot == NUM_TX_BUFF - 1)
1572 ctrl |= MAL_TX_CTRL_WRAP;
1573 wmb();
1574 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1575 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1576
1577 return emac_xmit_finish(dev, skb->len);
1578
1579 undo_frame:
1580
1581
1582
1583 while (slot != dev->tx_slot) {
1584 dev->tx_desc[slot].ctrl = 0;
1585 --dev->tx_cnt;
1586 if (--slot < 0)
1587 slot = NUM_TX_BUFF - 1;
1588 }
1589 ++dev->estats.tx_undo;
1590
1591 stop_queue:
1592 netif_stop_queue(ndev);
1593 DBG2(dev, "stopped TX queue" NL);
1594 return NETDEV_TX_BUSY;
1595}
1596
1597
1598static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1599{
1600 struct emac_error_stats *st = &dev->estats;
1601
1602 DBG(dev, "BD TX error %04x" NL, ctrl);
1603
1604 ++st->tx_bd_errors;
1605 if (ctrl & EMAC_TX_ST_BFCS)
1606 ++st->tx_bd_bad_fcs;
1607 if (ctrl & EMAC_TX_ST_LCS)
1608 ++st->tx_bd_carrier_loss;
1609 if (ctrl & EMAC_TX_ST_ED)
1610 ++st->tx_bd_excessive_deferral;
1611 if (ctrl & EMAC_TX_ST_EC)
1612 ++st->tx_bd_excessive_collisions;
1613 if (ctrl & EMAC_TX_ST_LC)
1614 ++st->tx_bd_late_collision;
1615 if (ctrl & EMAC_TX_ST_MC)
1616 ++st->tx_bd_multple_collisions;
1617 if (ctrl & EMAC_TX_ST_SC)
1618 ++st->tx_bd_single_collision;
1619 if (ctrl & EMAC_TX_ST_UR)
1620 ++st->tx_bd_underrun;
1621 if (ctrl & EMAC_TX_ST_SQE)
1622 ++st->tx_bd_sqe;
1623}
1624
1625static void emac_poll_tx(void *param)
1626{
1627 struct emac_instance *dev = param;
1628 u32 bad_mask;
1629
1630 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1631
1632 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1633 bad_mask = EMAC_IS_BAD_TX_TAH;
1634 else
1635 bad_mask = EMAC_IS_BAD_TX;
1636
1637 netif_tx_lock_bh(dev->ndev);
1638 if (dev->tx_cnt) {
1639 u16 ctrl;
1640 int slot = dev->ack_slot, n = 0;
1641 again:
1642 ctrl = dev->tx_desc[slot].ctrl;
1643 if (!(ctrl & MAL_TX_CTRL_READY)) {
1644 struct sk_buff *skb = dev->tx_skb[slot];
1645 ++n;
1646
1647 if (skb) {
1648 dev_kfree_skb(skb);
1649 dev->tx_skb[slot] = NULL;
1650 }
1651 slot = (slot + 1) % NUM_TX_BUFF;
1652
1653 if (unlikely(ctrl & bad_mask))
1654 emac_parse_tx_error(dev, ctrl);
1655
1656 if (--dev->tx_cnt)
1657 goto again;
1658 }
1659 if (n) {
1660 dev->ack_slot = slot;
1661 if (netif_queue_stopped(dev->ndev) &&
1662 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1663 netif_wake_queue(dev->ndev);
1664
1665 DBG2(dev, "tx %d pkts" NL, n);
1666 }
1667 }
1668 netif_tx_unlock_bh(dev->ndev);
1669}
1670
1671static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1672 int len)
1673{
1674 struct sk_buff *skb = dev->rx_skb[slot];
1675
1676 DBG2(dev, "recycle %d %d" NL, slot, len);
1677
1678 if (len)
1679 dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
1680 SKB_DATA_ALIGN(len + NET_IP_ALIGN),
1681 DMA_FROM_DEVICE);
1682
1683 dev->rx_desc[slot].data_len = 0;
1684 wmb();
1685 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1686 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1687}
1688
1689static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1690{
1691 struct emac_error_stats *st = &dev->estats;
1692
1693 DBG(dev, "BD RX error %04x" NL, ctrl);
1694
1695 ++st->rx_bd_errors;
1696 if (ctrl & EMAC_RX_ST_OE)
1697 ++st->rx_bd_overrun;
1698 if (ctrl & EMAC_RX_ST_BP)
1699 ++st->rx_bd_bad_packet;
1700 if (ctrl & EMAC_RX_ST_RP)
1701 ++st->rx_bd_runt_packet;
1702 if (ctrl & EMAC_RX_ST_SE)
1703 ++st->rx_bd_short_event;
1704 if (ctrl & EMAC_RX_ST_AE)
1705 ++st->rx_bd_alignment_error;
1706 if (ctrl & EMAC_RX_ST_BFCS)
1707 ++st->rx_bd_bad_fcs;
1708 if (ctrl & EMAC_RX_ST_PTL)
1709 ++st->rx_bd_packet_too_long;
1710 if (ctrl & EMAC_RX_ST_ORE)
1711 ++st->rx_bd_out_of_range;
1712 if (ctrl & EMAC_RX_ST_IRE)
1713 ++st->rx_bd_in_range;
1714}
1715
1716static inline void emac_rx_csum(struct emac_instance *dev,
1717 struct sk_buff *skb, u16 ctrl)
1718{
1719#ifdef CONFIG_IBM_EMAC_TAH
1720 if (!ctrl && dev->tah_dev) {
1721 skb->ip_summed = CHECKSUM_UNNECESSARY;
1722 ++dev->stats.rx_packets_csum;
1723 }
1724#endif
1725}
1726
1727static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1728{
1729 if (likely(dev->rx_sg_skb != NULL)) {
1730 int len = dev->rx_desc[slot].data_len;
1731 int tot_len = dev->rx_sg_skb->len + len;
1732
1733 if (unlikely(tot_len + NET_IP_ALIGN > dev->rx_skb_size)) {
1734 ++dev->estats.rx_dropped_mtu;
1735 dev_kfree_skb(dev->rx_sg_skb);
1736 dev->rx_sg_skb = NULL;
1737 } else {
1738 memcpy(skb_tail_pointer(dev->rx_sg_skb),
1739 dev->rx_skb[slot]->data, len);
1740 skb_put(dev->rx_sg_skb, len);
1741 emac_recycle_rx_skb(dev, slot, len);
1742 return 0;
1743 }
1744 }
1745 emac_recycle_rx_skb(dev, slot, 0);
1746 return -1;
1747}
1748
1749
1750static int emac_poll_rx(void *param, int budget)
1751{
1752 struct emac_instance *dev = param;
1753 int slot = dev->rx_slot, received = 0;
1754
1755 DBG2(dev, "poll_rx(%d)" NL, budget);
1756
1757 again:
1758 while (budget > 0) {
1759 int len;
1760 struct sk_buff *skb;
1761 u16 ctrl = dev->rx_desc[slot].ctrl;
1762
1763 if (ctrl & MAL_RX_CTRL_EMPTY)
1764 break;
1765
1766 skb = dev->rx_skb[slot];
1767 mb();
1768 len = dev->rx_desc[slot].data_len;
1769
1770 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1771 goto sg;
1772
1773 ctrl &= EMAC_BAD_RX_MASK;
1774 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1775 emac_parse_rx_error(dev, ctrl);
1776 ++dev->estats.rx_dropped_error;
1777 emac_recycle_rx_skb(dev, slot, 0);
1778 len = 0;
1779 goto next;
1780 }
1781
1782 if (len < ETH_HLEN) {
1783 ++dev->estats.rx_dropped_stack;
1784 emac_recycle_rx_skb(dev, slot, len);
1785 goto next;
1786 }
1787
1788 if (len && len < EMAC_RX_COPY_THRESH) {
1789 struct sk_buff *copy_skb;
1790
1791 copy_skb = napi_alloc_skb(&dev->mal->napi, len);
1792 if (unlikely(!copy_skb))
1793 goto oom;
1794
1795 memcpy(copy_skb->data - NET_IP_ALIGN,
1796 skb->data - NET_IP_ALIGN,
1797 len + NET_IP_ALIGN);
1798 emac_recycle_rx_skb(dev, slot, len);
1799 skb = copy_skb;
1800 } else if (unlikely(emac_alloc_rx_skb_napi(dev, slot)))
1801 goto oom;
1802
1803 skb_put(skb, len);
1804 push_packet:
1805 skb->protocol = eth_type_trans(skb, dev->ndev);
1806 emac_rx_csum(dev, skb, ctrl);
1807
1808 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1809 ++dev->estats.rx_dropped_stack;
1810 next:
1811 ++dev->stats.rx_packets;
1812 skip:
1813 dev->stats.rx_bytes += len;
1814 slot = (slot + 1) % NUM_RX_BUFF;
1815 --budget;
1816 ++received;
1817 continue;
1818 sg:
1819 if (ctrl & MAL_RX_CTRL_FIRST) {
1820 BUG_ON(dev->rx_sg_skb);
1821 if (unlikely(emac_alloc_rx_skb_napi(dev, slot))) {
1822 DBG(dev, "rx OOM %d" NL, slot);
1823 ++dev->estats.rx_dropped_oom;
1824 emac_recycle_rx_skb(dev, slot, 0);
1825 } else {
1826 dev->rx_sg_skb = skb;
1827 skb_put(skb, len);
1828 }
1829 } else if (!emac_rx_sg_append(dev, slot) &&
1830 (ctrl & MAL_RX_CTRL_LAST)) {
1831
1832 skb = dev->rx_sg_skb;
1833 dev->rx_sg_skb = NULL;
1834
1835 ctrl &= EMAC_BAD_RX_MASK;
1836 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1837 emac_parse_rx_error(dev, ctrl);
1838 ++dev->estats.rx_dropped_error;
1839 dev_kfree_skb(skb);
1840 len = 0;
1841 } else
1842 goto push_packet;
1843 }
1844 goto skip;
1845 oom:
1846 DBG(dev, "rx OOM %d" NL, slot);
1847
1848 ++dev->estats.rx_dropped_oom;
1849 emac_recycle_rx_skb(dev, slot, 0);
1850 goto next;
1851 }
1852
1853 if (received) {
1854 DBG2(dev, "rx %d BDs" NL, received);
1855 dev->rx_slot = slot;
1856 }
1857
1858 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1859 mb();
1860 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1861 DBG2(dev, "rx restart" NL);
1862 received = 0;
1863 goto again;
1864 }
1865
1866 if (dev->rx_sg_skb) {
1867 DBG2(dev, "dropping partial rx packet" NL);
1868 ++dev->estats.rx_dropped_error;
1869 dev_kfree_skb(dev->rx_sg_skb);
1870 dev->rx_sg_skb = NULL;
1871 }
1872
1873 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1874 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1875 emac_rx_enable(dev);
1876 dev->rx_slot = 0;
1877 }
1878 return received;
1879}
1880
1881
1882static int emac_peek_rx(void *param)
1883{
1884 struct emac_instance *dev = param;
1885
1886 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1887}
1888
1889
1890static int emac_peek_rx_sg(void *param)
1891{
1892 struct emac_instance *dev = param;
1893
1894 int slot = dev->rx_slot;
1895 while (1) {
1896 u16 ctrl = dev->rx_desc[slot].ctrl;
1897 if (ctrl & MAL_RX_CTRL_EMPTY)
1898 return 0;
1899 else if (ctrl & MAL_RX_CTRL_LAST)
1900 return 1;
1901
1902 slot = (slot + 1) % NUM_RX_BUFF;
1903
1904
1905 if (unlikely(slot == dev->rx_slot))
1906 return 0;
1907 }
1908}
1909
1910
1911static void emac_rxde(void *param)
1912{
1913 struct emac_instance *dev = param;
1914
1915 ++dev->estats.rx_stopped;
1916 emac_rx_disable_async(dev);
1917}
1918
1919
1920static irqreturn_t emac_irq(int irq, void *dev_instance)
1921{
1922 struct emac_instance *dev = dev_instance;
1923 struct emac_regs __iomem *p = dev->emacp;
1924 struct emac_error_stats *st = &dev->estats;
1925 u32 isr;
1926
1927 spin_lock(&dev->lock);
1928
1929 isr = in_be32(&p->isr);
1930 out_be32(&p->isr, isr);
1931
1932 DBG(dev, "isr = %08x" NL, isr);
1933
1934 if (isr & EMAC4_ISR_TXPE)
1935 ++st->tx_parity;
1936 if (isr & EMAC4_ISR_RXPE)
1937 ++st->rx_parity;
1938 if (isr & EMAC4_ISR_TXUE)
1939 ++st->tx_underrun;
1940 if (isr & EMAC4_ISR_RXOE)
1941 ++st->rx_fifo_overrun;
1942 if (isr & EMAC_ISR_OVR)
1943 ++st->rx_overrun;
1944 if (isr & EMAC_ISR_BP)
1945 ++st->rx_bad_packet;
1946 if (isr & EMAC_ISR_RP)
1947 ++st->rx_runt_packet;
1948 if (isr & EMAC_ISR_SE)
1949 ++st->rx_short_event;
1950 if (isr & EMAC_ISR_ALE)
1951 ++st->rx_alignment_error;
1952 if (isr & EMAC_ISR_BFCS)
1953 ++st->rx_bad_fcs;
1954 if (isr & EMAC_ISR_PTLE)
1955 ++st->rx_packet_too_long;
1956 if (isr & EMAC_ISR_ORE)
1957 ++st->rx_out_of_range;
1958 if (isr & EMAC_ISR_IRE)
1959 ++st->rx_in_range;
1960 if (isr & EMAC_ISR_SQE)
1961 ++st->tx_sqe;
1962 if (isr & EMAC_ISR_TE)
1963 ++st->tx_errors;
1964
1965 spin_unlock(&dev->lock);
1966
1967 return IRQ_HANDLED;
1968}
1969
1970static struct net_device_stats *emac_stats(struct net_device *ndev)
1971{
1972 struct emac_instance *dev = netdev_priv(ndev);
1973 struct emac_stats *st = &dev->stats;
1974 struct emac_error_stats *est = &dev->estats;
1975 struct net_device_stats *nst = &ndev->stats;
1976 unsigned long flags;
1977
1978 DBG2(dev, "stats" NL);
1979
1980
1981 spin_lock_irqsave(&dev->lock, flags);
1982 nst->rx_packets = (unsigned long)st->rx_packets;
1983 nst->rx_bytes = (unsigned long)st->rx_bytes;
1984 nst->tx_packets = (unsigned long)st->tx_packets;
1985 nst->tx_bytes = (unsigned long)st->tx_bytes;
1986 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1987 est->rx_dropped_error +
1988 est->rx_dropped_resize +
1989 est->rx_dropped_mtu);
1990 nst->tx_dropped = (unsigned long)est->tx_dropped;
1991
1992 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1993 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1994 est->rx_fifo_overrun +
1995 est->rx_overrun);
1996 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1997 est->rx_alignment_error);
1998 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1999 est->rx_bad_fcs);
2000 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
2001 est->rx_bd_short_event +
2002 est->rx_bd_packet_too_long +
2003 est->rx_bd_out_of_range +
2004 est->rx_bd_in_range +
2005 est->rx_runt_packet +
2006 est->rx_short_event +
2007 est->rx_packet_too_long +
2008 est->rx_out_of_range +
2009 est->rx_in_range);
2010
2011 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
2012 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
2013 est->tx_underrun);
2014 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
2015 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
2016 est->tx_bd_excessive_collisions +
2017 est->tx_bd_late_collision +
2018 est->tx_bd_multple_collisions);
2019 spin_unlock_irqrestore(&dev->lock, flags);
2020 return nst;
2021}
2022
2023static struct mal_commac_ops emac_commac_ops = {
2024 .poll_tx = &emac_poll_tx,
2025 .poll_rx = &emac_poll_rx,
2026 .peek_rx = &emac_peek_rx,
2027 .rxde = &emac_rxde,
2028};
2029
2030static struct mal_commac_ops emac_commac_sg_ops = {
2031 .poll_tx = &emac_poll_tx,
2032 .poll_rx = &emac_poll_rx,
2033 .peek_rx = &emac_peek_rx_sg,
2034 .rxde = &emac_rxde,
2035};
2036
2037
2038static int emac_ethtool_get_link_ksettings(struct net_device *ndev,
2039 struct ethtool_link_ksettings *cmd)
2040{
2041 struct emac_instance *dev = netdev_priv(ndev);
2042 u32 supported, advertising;
2043
2044 supported = dev->phy.features;
2045 cmd->base.port = PORT_MII;
2046 cmd->base.phy_address = dev->phy.address;
2047
2048 mutex_lock(&dev->link_lock);
2049 advertising = dev->phy.advertising;
2050 cmd->base.autoneg = dev->phy.autoneg;
2051 cmd->base.speed = dev->phy.speed;
2052 cmd->base.duplex = dev->phy.duplex;
2053 mutex_unlock(&dev->link_lock);
2054
2055 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2056 supported);
2057 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2058 advertising);
2059
2060 return 0;
2061}
2062
2063static int
2064emac_ethtool_set_link_ksettings(struct net_device *ndev,
2065 const struct ethtool_link_ksettings *cmd)
2066{
2067 struct emac_instance *dev = netdev_priv(ndev);
2068 u32 f = dev->phy.features;
2069 u32 advertising;
2070
2071 ethtool_convert_link_mode_to_legacy_u32(&advertising,
2072 cmd->link_modes.advertising);
2073
2074 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
2075 cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising);
2076
2077
2078 if (dev->phy.address < 0)
2079 return -EOPNOTSUPP;
2080 if (cmd->base.autoneg != AUTONEG_ENABLE &&
2081 cmd->base.autoneg != AUTONEG_DISABLE)
2082 return -EINVAL;
2083 if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0)
2084 return -EINVAL;
2085 if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL)
2086 return -EINVAL;
2087
2088 if (cmd->base.autoneg == AUTONEG_DISABLE) {
2089 switch (cmd->base.speed) {
2090 case SPEED_10:
2091 if (cmd->base.duplex == DUPLEX_HALF &&
2092 !(f & SUPPORTED_10baseT_Half))
2093 return -EINVAL;
2094 if (cmd->base.duplex == DUPLEX_FULL &&
2095 !(f & SUPPORTED_10baseT_Full))
2096 return -EINVAL;
2097 break;
2098 case SPEED_100:
2099 if (cmd->base.duplex == DUPLEX_HALF &&
2100 !(f & SUPPORTED_100baseT_Half))
2101 return -EINVAL;
2102 if (cmd->base.duplex == DUPLEX_FULL &&
2103 !(f & SUPPORTED_100baseT_Full))
2104 return -EINVAL;
2105 break;
2106 case SPEED_1000:
2107 if (cmd->base.duplex == DUPLEX_HALF &&
2108 !(f & SUPPORTED_1000baseT_Half))
2109 return -EINVAL;
2110 if (cmd->base.duplex == DUPLEX_FULL &&
2111 !(f & SUPPORTED_1000baseT_Full))
2112 return -EINVAL;
2113 break;
2114 default:
2115 return -EINVAL;
2116 }
2117
2118 mutex_lock(&dev->link_lock);
2119 dev->phy.def->ops->setup_forced(&dev->phy, cmd->base.speed,
2120 cmd->base.duplex);
2121 mutex_unlock(&dev->link_lock);
2122
2123 } else {
2124 if (!(f & SUPPORTED_Autoneg))
2125 return -EINVAL;
2126
2127 mutex_lock(&dev->link_lock);
2128 dev->phy.def->ops->setup_aneg(&dev->phy,
2129 (advertising & f) |
2130 (dev->phy.advertising &
2131 (ADVERTISED_Pause |
2132 ADVERTISED_Asym_Pause)));
2133 mutex_unlock(&dev->link_lock);
2134 }
2135 emac_force_link_update(dev);
2136
2137 return 0;
2138}
2139
2140static void emac_ethtool_get_ringparam(struct net_device *ndev,
2141 struct ethtool_ringparam *rp)
2142{
2143 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2144 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2145}
2146
2147static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2148 struct ethtool_pauseparam *pp)
2149{
2150 struct emac_instance *dev = netdev_priv(ndev);
2151
2152 mutex_lock(&dev->link_lock);
2153 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2154 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2155 pp->autoneg = 1;
2156
2157 if (dev->phy.duplex == DUPLEX_FULL) {
2158 if (dev->phy.pause)
2159 pp->rx_pause = pp->tx_pause = 1;
2160 else if (dev->phy.asym_pause)
2161 pp->tx_pause = 1;
2162 }
2163 mutex_unlock(&dev->link_lock);
2164}
2165
2166static int emac_get_regs_len(struct emac_instance *dev)
2167{
2168 return sizeof(struct emac_ethtool_regs_subhdr) +
2169 sizeof(struct emac_regs);
2170}
2171
2172static int emac_ethtool_get_regs_len(struct net_device *ndev)
2173{
2174 struct emac_instance *dev = netdev_priv(ndev);
2175 int size;
2176
2177 size = sizeof(struct emac_ethtool_regs_hdr) +
2178 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2179 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2180 size += zmii_get_regs_len(dev->zmii_dev);
2181 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2182 size += rgmii_get_regs_len(dev->rgmii_dev);
2183 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2184 size += tah_get_regs_len(dev->tah_dev);
2185
2186 return size;
2187}
2188
2189static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2190{
2191 struct emac_ethtool_regs_subhdr *hdr = buf;
2192
2193 hdr->index = dev->cell_index;
2194 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2195 hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
2196 } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2197 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2198 } else {
2199 hdr->version = EMAC_ETHTOOL_REGS_VER;
2200 }
2201 memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
2202 return (void *)(hdr + 1) + sizeof(struct emac_regs);
2203}
2204
2205static void emac_ethtool_get_regs(struct net_device *ndev,
2206 struct ethtool_regs *regs, void *buf)
2207{
2208 struct emac_instance *dev = netdev_priv(ndev);
2209 struct emac_ethtool_regs_hdr *hdr = buf;
2210
2211 hdr->components = 0;
2212 buf = hdr + 1;
2213
2214 buf = mal_dump_regs(dev->mal, buf);
2215 buf = emac_dump_regs(dev, buf);
2216 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2217 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2218 buf = zmii_dump_regs(dev->zmii_dev, buf);
2219 }
2220 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2221 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2222 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2223 }
2224 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2225 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2226 buf = tah_dump_regs(dev->tah_dev, buf);
2227 }
2228}
2229
2230static int emac_ethtool_nway_reset(struct net_device *ndev)
2231{
2232 struct emac_instance *dev = netdev_priv(ndev);
2233 int res = 0;
2234
2235 DBG(dev, "nway_reset" NL);
2236
2237 if (dev->phy.address < 0)
2238 return -EOPNOTSUPP;
2239
2240 mutex_lock(&dev->link_lock);
2241 if (!dev->phy.autoneg) {
2242 res = -EINVAL;
2243 goto out;
2244 }
2245
2246 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2247 out:
2248 mutex_unlock(&dev->link_lock);
2249 emac_force_link_update(dev);
2250 return res;
2251}
2252
2253static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2254{
2255 if (stringset == ETH_SS_STATS)
2256 return EMAC_ETHTOOL_STATS_COUNT;
2257 else
2258 return -EINVAL;
2259}
2260
2261static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2262 u8 * buf)
2263{
2264 if (stringset == ETH_SS_STATS)
2265 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2266}
2267
2268static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2269 struct ethtool_stats *estats,
2270 u64 * tmp_stats)
2271{
2272 struct emac_instance *dev = netdev_priv(ndev);
2273
2274 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2275 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2276 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2277}
2278
2279static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2280 struct ethtool_drvinfo *info)
2281{
2282 struct emac_instance *dev = netdev_priv(ndev);
2283
2284 strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
2285 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2286 snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %pOF",
2287 dev->cell_index, dev->ofdev->dev.of_node);
2288}
2289
2290static const struct ethtool_ops emac_ethtool_ops = {
2291 .get_drvinfo = emac_ethtool_get_drvinfo,
2292
2293 .get_regs_len = emac_ethtool_get_regs_len,
2294 .get_regs = emac_ethtool_get_regs,
2295
2296 .nway_reset = emac_ethtool_nway_reset,
2297
2298 .get_ringparam = emac_ethtool_get_ringparam,
2299 .get_pauseparam = emac_ethtool_get_pauseparam,
2300
2301 .get_strings = emac_ethtool_get_strings,
2302 .get_sset_count = emac_ethtool_get_sset_count,
2303 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2304
2305 .get_link = ethtool_op_get_link,
2306 .get_link_ksettings = emac_ethtool_get_link_ksettings,
2307 .set_link_ksettings = emac_ethtool_set_link_ksettings,
2308};
2309
2310static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2311{
2312 struct emac_instance *dev = netdev_priv(ndev);
2313 struct mii_ioctl_data *data = if_mii(rq);
2314
2315 DBG(dev, "ioctl %08x" NL, cmd);
2316
2317 if (dev->phy.address < 0)
2318 return -EOPNOTSUPP;
2319
2320 switch (cmd) {
2321 case SIOCGMIIPHY:
2322 data->phy_id = dev->phy.address;
2323 fallthrough;
2324 case SIOCGMIIREG:
2325 data->val_out = emac_mdio_read(ndev, dev->phy.address,
2326 data->reg_num);
2327 return 0;
2328
2329 case SIOCSMIIREG:
2330 emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2331 data->val_in);
2332 return 0;
2333 default:
2334 return -EOPNOTSUPP;
2335 }
2336}
2337
2338struct emac_depentry {
2339 u32 phandle;
2340 struct device_node *node;
2341 struct platform_device *ofdev;
2342 void *drvdata;
2343};
2344
2345#define EMAC_DEP_MAL_IDX 0
2346#define EMAC_DEP_ZMII_IDX 1
2347#define EMAC_DEP_RGMII_IDX 2
2348#define EMAC_DEP_TAH_IDX 3
2349#define EMAC_DEP_MDIO_IDX 4
2350#define EMAC_DEP_PREV_IDX 5
2351#define EMAC_DEP_COUNT 6
2352
2353static int emac_check_deps(struct emac_instance *dev,
2354 struct emac_depentry *deps)
2355{
2356 int i, there = 0;
2357 struct device_node *np;
2358
2359 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2360
2361 if (deps[i].phandle == 0) {
2362 there++;
2363 continue;
2364 }
2365
2366 if (i == EMAC_DEP_PREV_IDX) {
2367 np = *(dev->blist - 1);
2368 if (np == NULL) {
2369 deps[i].phandle = 0;
2370 there++;
2371 continue;
2372 }
2373 if (deps[i].node == NULL)
2374 deps[i].node = of_node_get(np);
2375 }
2376 if (deps[i].node == NULL)
2377 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2378 if (deps[i].node == NULL)
2379 continue;
2380 if (deps[i].ofdev == NULL)
2381 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2382 if (deps[i].ofdev == NULL)
2383 continue;
2384 if (deps[i].drvdata == NULL)
2385 deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
2386 if (deps[i].drvdata != NULL)
2387 there++;
2388 }
2389 return there == EMAC_DEP_COUNT;
2390}
2391
2392static void emac_put_deps(struct emac_instance *dev)
2393{
2394 platform_device_put(dev->mal_dev);
2395 platform_device_put(dev->zmii_dev);
2396 platform_device_put(dev->rgmii_dev);
2397 platform_device_put(dev->mdio_dev);
2398 platform_device_put(dev->tah_dev);
2399}
2400
2401static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2402 void *data)
2403{
2404
2405 if (action == BUS_NOTIFY_BOUND_DRIVER)
2406 wake_up_all(&emac_probe_wait);
2407 return 0;
2408}
2409
2410static struct notifier_block emac_of_bus_notifier = {
2411 .notifier_call = emac_of_bus_notify
2412};
2413
2414static int emac_wait_deps(struct emac_instance *dev)
2415{
2416 struct emac_depentry deps[EMAC_DEP_COUNT];
2417 int i, err;
2418
2419 memset(&deps, 0, sizeof(deps));
2420
2421 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2422 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2423 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2424 if (dev->tah_ph)
2425 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2426 if (dev->mdio_ph)
2427 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2428 if (dev->blist && dev->blist > emac_boot_list)
2429 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2430 bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2431 wait_event_timeout(emac_probe_wait,
2432 emac_check_deps(dev, deps),
2433 EMAC_PROBE_DEP_TIMEOUT);
2434 bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2435 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2436 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2437 of_node_put(deps[i].node);
2438 if (err)
2439 platform_device_put(deps[i].ofdev);
2440 }
2441 if (err == 0) {
2442 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2443 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2444 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2445 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2446 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2447 }
2448 platform_device_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2449 return err;
2450}
2451
2452static int emac_read_uint_prop(struct device_node *np, const char *name,
2453 u32 *val, int fatal)
2454{
2455 int len;
2456 const u32 *prop = of_get_property(np, name, &len);
2457 if (prop == NULL || len < sizeof(u32)) {
2458 if (fatal)
2459 printk(KERN_ERR "%pOF: missing %s property\n",
2460 np, name);
2461 return -ENODEV;
2462 }
2463 *val = *prop;
2464 return 0;
2465}
2466
2467static void emac_adjust_link(struct net_device *ndev)
2468{
2469 struct emac_instance *dev = netdev_priv(ndev);
2470 struct phy_device *phy = dev->phy_dev;
2471
2472 dev->phy.autoneg = phy->autoneg;
2473 dev->phy.speed = phy->speed;
2474 dev->phy.duplex = phy->duplex;
2475 dev->phy.pause = phy->pause;
2476 dev->phy.asym_pause = phy->asym_pause;
2477 ethtool_convert_link_mode_to_legacy_u32(&dev->phy.advertising,
2478 phy->advertising);
2479}
2480
2481static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
2482{
2483 int ret = emac_mdio_read(bus->priv, addr, regnum);
2484
2485
2486
2487
2488
2489
2490 return ret < 0 ? 0xffff : ret;
2491}
2492
2493static int emac_mii_bus_write(struct mii_bus *bus, int addr,
2494 int regnum, u16 val)
2495{
2496 emac_mdio_write(bus->priv, addr, regnum, val);
2497 return 0;
2498}
2499
2500static int emac_mii_bus_reset(struct mii_bus *bus)
2501{
2502 struct emac_instance *dev = netdev_priv(bus->priv);
2503
2504 return emac_reset(dev);
2505}
2506
2507static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
2508 struct phy_device *phy_dev)
2509{
2510 phy_dev->autoneg = phy->autoneg;
2511 phy_dev->speed = phy->speed;
2512 phy_dev->duplex = phy->duplex;
2513 ethtool_convert_legacy_u32_to_link_mode(phy_dev->advertising,
2514 phy->advertising);
2515 return phy_start_aneg(phy_dev);
2516}
2517
2518static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
2519{
2520 struct net_device *ndev = phy->dev;
2521 struct emac_instance *dev = netdev_priv(ndev);
2522
2523 phy->autoneg = AUTONEG_ENABLE;
2524 phy->advertising = advertise;
2525 return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2526}
2527
2528static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
2529{
2530 struct net_device *ndev = phy->dev;
2531 struct emac_instance *dev = netdev_priv(ndev);
2532
2533 phy->autoneg = AUTONEG_DISABLE;
2534 phy->speed = speed;
2535 phy->duplex = fd;
2536 return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2537}
2538
2539static int emac_mdio_poll_link(struct mii_phy *phy)
2540{
2541 struct net_device *ndev = phy->dev;
2542 struct emac_instance *dev = netdev_priv(ndev);
2543 int res;
2544
2545 res = phy_read_status(dev->phy_dev);
2546 if (res) {
2547 dev_err(&dev->ofdev->dev, "link update failed (%d).", res);
2548 return ethtool_op_get_link(ndev);
2549 }
2550
2551 return dev->phy_dev->link;
2552}
2553
2554static int emac_mdio_read_link(struct mii_phy *phy)
2555{
2556 struct net_device *ndev = phy->dev;
2557 struct emac_instance *dev = netdev_priv(ndev);
2558 struct phy_device *phy_dev = dev->phy_dev;
2559 int res;
2560
2561 res = phy_read_status(phy_dev);
2562 if (res)
2563 return res;
2564
2565 phy->speed = phy_dev->speed;
2566 phy->duplex = phy_dev->duplex;
2567 phy->pause = phy_dev->pause;
2568 phy->asym_pause = phy_dev->asym_pause;
2569 return 0;
2570}
2571
2572static int emac_mdio_init_phy(struct mii_phy *phy)
2573{
2574 struct net_device *ndev = phy->dev;
2575 struct emac_instance *dev = netdev_priv(ndev);
2576
2577 phy_start(dev->phy_dev);
2578 return phy_init_hw(dev->phy_dev);
2579}
2580
2581static const struct mii_phy_ops emac_dt_mdio_phy_ops = {
2582 .init = emac_mdio_init_phy,
2583 .setup_aneg = emac_mdio_setup_aneg,
2584 .setup_forced = emac_mdio_setup_forced,
2585 .poll_link = emac_mdio_poll_link,
2586 .read_link = emac_mdio_read_link,
2587};
2588
2589static int emac_dt_mdio_probe(struct emac_instance *dev)
2590{
2591 struct device_node *mii_np;
2592 int res;
2593
2594 mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
2595 if (!mii_np) {
2596 dev_err(&dev->ofdev->dev, "no mdio definition found.");
2597 return -ENODEV;
2598 }
2599
2600 if (!of_device_is_available(mii_np)) {
2601 res = -ENODEV;
2602 goto put_node;
2603 }
2604
2605 dev->mii_bus = devm_mdiobus_alloc(&dev->ofdev->dev);
2606 if (!dev->mii_bus) {
2607 res = -ENOMEM;
2608 goto put_node;
2609 }
2610
2611 dev->mii_bus->priv = dev->ndev;
2612 dev->mii_bus->parent = dev->ndev->dev.parent;
2613 dev->mii_bus->name = "emac_mdio";
2614 dev->mii_bus->read = &emac_mii_bus_read;
2615 dev->mii_bus->write = &emac_mii_bus_write;
2616 dev->mii_bus->reset = &emac_mii_bus_reset;
2617 snprintf(dev->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name);
2618 res = of_mdiobus_register(dev->mii_bus, mii_np);
2619 if (res) {
2620 dev_err(&dev->ofdev->dev, "cannot register MDIO bus %s (%d)",
2621 dev->mii_bus->name, res);
2622 }
2623
2624 put_node:
2625 of_node_put(mii_np);
2626 return res;
2627}
2628
2629static int emac_dt_phy_connect(struct emac_instance *dev,
2630 struct device_node *phy_handle)
2631{
2632 dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
2633 GFP_KERNEL);
2634 if (!dev->phy.def)
2635 return -ENOMEM;
2636
2637 dev->phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link,
2638 0, dev->phy_mode);
2639 if (!dev->phy_dev) {
2640 dev_err(&dev->ofdev->dev, "failed to connect to PHY.\n");
2641 return -ENODEV;
2642 }
2643
2644 dev->phy.def->phy_id = dev->phy_dev->drv->phy_id;
2645 dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
2646 dev->phy.def->name = dev->phy_dev->drv->name;
2647 dev->phy.def->ops = &emac_dt_mdio_phy_ops;
2648 ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features,
2649 dev->phy_dev->supported);
2650 dev->phy.address = dev->phy_dev->mdio.addr;
2651 dev->phy.mode = dev->phy_dev->interface;
2652 return 0;
2653}
2654
2655static int emac_dt_phy_probe(struct emac_instance *dev)
2656{
2657 struct device_node *np = dev->ofdev->dev.of_node;
2658 struct device_node *phy_handle;
2659 int res = 1;
2660
2661 phy_handle = of_parse_phandle(np, "phy-handle", 0);
2662
2663 if (phy_handle) {
2664 res = emac_dt_mdio_probe(dev);
2665 if (!res) {
2666 res = emac_dt_phy_connect(dev, phy_handle);
2667 if (res)
2668 mdiobus_unregister(dev->mii_bus);
2669 }
2670 }
2671
2672 of_node_put(phy_handle);
2673 return res;
2674}
2675
2676static int emac_init_phy(struct emac_instance *dev)
2677{
2678 struct device_node *np = dev->ofdev->dev.of_node;
2679 struct net_device *ndev = dev->ndev;
2680 u32 phy_map, adv;
2681 int i;
2682
2683 dev->phy.dev = ndev;
2684 dev->phy.mode = dev->phy_mode;
2685
2686
2687 if ((dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) ||
2688 of_phy_is_fixed_link(np)) {
2689 emac_reset(dev);
2690
2691
2692 dev->phy.address = -1;
2693 dev->phy.features = SUPPORTED_MII;
2694 if (emac_phy_supports_gige(dev->phy_mode))
2695 dev->phy.features |= SUPPORTED_1000baseT_Full;
2696 else
2697 dev->phy.features |= SUPPORTED_100baseT_Full;
2698 dev->phy.pause = 1;
2699
2700 if (of_phy_is_fixed_link(np)) {
2701 int res = emac_dt_mdio_probe(dev);
2702
2703 if (res)
2704 return res;
2705
2706 res = of_phy_register_fixed_link(np);
2707 dev->phy_dev = of_phy_find_device(np);
2708 if (res || !dev->phy_dev) {
2709 mdiobus_unregister(dev->mii_bus);
2710 return res ? res : -EINVAL;
2711 }
2712 emac_adjust_link(dev->ndev);
2713 put_device(&dev->phy_dev->mdio.dev);
2714 }
2715 return 0;
2716 }
2717
2718 mutex_lock(&emac_phy_map_lock);
2719 phy_map = dev->phy_map | busy_phy_map;
2720
2721 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2722
2723 dev->phy.mdio_read = emac_mdio_read;
2724 dev->phy.mdio_write = emac_mdio_write;
2725
2726
2727#ifdef CONFIG_PPC_DCR_NATIVE
2728 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2729 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2730#endif
2731
2732 emac_rx_clk_tx(dev);
2733
2734
2735#ifdef CONFIG_PPC_DCR_NATIVE
2736 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2737 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2738#endif
2739
2740
2741
2742 if (emac_phy_gpcs(dev->phy.mode)) {
2743
2744
2745
2746
2747
2748
2749
2750
2751 dev->phy.gpcs_address = dev->gpcs_address;
2752 if (dev->phy.gpcs_address == 0xffffffff)
2753 dev->phy.address = dev->cell_index;
2754 }
2755
2756 emac_configure(dev);
2757
2758 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2759 int res = emac_dt_phy_probe(dev);
2760
2761 switch (res) {
2762 case 1:
2763
2764
2765
2766
2767 break;
2768
2769 case 0:
2770 mutex_unlock(&emac_phy_map_lock);
2771 goto init_phy;
2772
2773 default:
2774 mutex_unlock(&emac_phy_map_lock);
2775 dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
2776 res);
2777 return res;
2778 }
2779 }
2780
2781 if (dev->phy_address != 0xffffffff)
2782 phy_map = ~(1 << dev->phy_address);
2783
2784 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2785 if (!(phy_map & 1)) {
2786 int r;
2787 busy_phy_map |= 1 << i;
2788
2789
2790 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2791 if (r == 0xffff || r < 0)
2792 continue;
2793 if (!emac_mii_phy_probe(&dev->phy, i))
2794 break;
2795 }
2796
2797
2798#ifdef CONFIG_PPC_DCR_NATIVE
2799 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2800 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2801#endif
2802 mutex_unlock(&emac_phy_map_lock);
2803 if (i == 0x20) {
2804 printk(KERN_WARNING "%pOF: can't find PHY!\n", np);
2805 return -ENXIO;
2806 }
2807
2808 init_phy:
2809
2810 if (dev->phy.def->ops->init)
2811 dev->phy.def->ops->init(&dev->phy);
2812
2813
2814 dev->phy.def->features &= ~dev->phy_feat_exc;
2815 dev->phy.features &= ~dev->phy_feat_exc;
2816
2817
2818 if (dev->phy.features & SUPPORTED_Autoneg) {
2819 adv = dev->phy.features;
2820 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2821 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2822
2823 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2824 } else {
2825 u32 f = dev->phy.def->features;
2826 int speed = SPEED_10, fd = DUPLEX_HALF;
2827
2828
2829 if (f & SUPPORTED_1000baseT_Full) {
2830 speed = SPEED_1000;
2831 fd = DUPLEX_FULL;
2832 } else if (f & SUPPORTED_1000baseT_Half)
2833 speed = SPEED_1000;
2834 else if (f & SUPPORTED_100baseT_Full) {
2835 speed = SPEED_100;
2836 fd = DUPLEX_FULL;
2837 } else if (f & SUPPORTED_100baseT_Half)
2838 speed = SPEED_100;
2839 else if (f & SUPPORTED_10baseT_Full)
2840 fd = DUPLEX_FULL;
2841
2842
2843 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2844 }
2845 return 0;
2846}
2847
2848static int emac_init_config(struct emac_instance *dev)
2849{
2850 struct device_node *np = dev->ofdev->dev.of_node;
2851 const void *p;
2852 int err;
2853
2854
2855 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2856 return -ENXIO;
2857 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2858 return -ENXIO;
2859 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2860 return -ENXIO;
2861 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2862 return -ENXIO;
2863 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2864 dev->max_mtu = ETH_DATA_LEN;
2865 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2866 dev->rx_fifo_size = 2048;
2867 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2868 dev->tx_fifo_size = 2048;
2869 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2870 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2871 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2872 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2873 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2874 dev->phy_address = 0xffffffff;
2875 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2876 dev->phy_map = 0xffffffff;
2877 if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2878 dev->gpcs_address = 0xffffffff;
2879 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2880 return -ENXIO;
2881 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2882 dev->tah_ph = 0;
2883 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2884 dev->tah_port = 0;
2885 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2886 dev->mdio_ph = 0;
2887 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2888 dev->zmii_ph = 0;
2889 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2890 dev->zmii_port = 0xffffffff;
2891 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2892 dev->rgmii_ph = 0;
2893 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2894 dev->rgmii_port = 0xffffffff;
2895 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2896 dev->fifo_entry_size = 16;
2897 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2898 dev->mal_burst_size = 256;
2899
2900
2901 err = of_get_phy_mode(np, &dev->phy_mode);
2902 if (err)
2903 dev->phy_mode = PHY_INTERFACE_MODE_NA;
2904
2905
2906 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2907 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2908 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2909 of_device_is_compatible(np, "ibm,emac-460gt"))
2910 dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2911 if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2912 of_device_is_compatible(np, "ibm,emac-405exr"))
2913 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2914 if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2915 dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
2916 EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
2917 EMAC_FTR_460EX_PHY_CLK_FIX);
2918 }
2919 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2920 dev->features |= EMAC_FTR_EMAC4;
2921 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2922 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2923 } else {
2924 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2925 of_device_is_compatible(np, "ibm,emac-440gr"))
2926 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2927 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2928#ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2929 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2930#else
2931 printk(KERN_ERR "%pOF: Flow control not disabled!\n",
2932 np);
2933 return -ENXIO;
2934#endif
2935 }
2936
2937 }
2938
2939
2940 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2941 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2942 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2943 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2944
2945
2946 if (of_device_is_compatible(np, "ibm,emac-axon"))
2947 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2948 EMAC_FTR_STACR_OC_INVERT;
2949
2950
2951 if (dev->tah_ph != 0) {
2952#ifdef CONFIG_IBM_EMAC_TAH
2953 dev->features |= EMAC_FTR_HAS_TAH;
2954#else
2955 printk(KERN_ERR "%pOF: TAH support not enabled !\n", np);
2956 return -ENXIO;
2957#endif
2958 }
2959
2960 if (dev->zmii_ph != 0) {
2961#ifdef CONFIG_IBM_EMAC_ZMII
2962 dev->features |= EMAC_FTR_HAS_ZMII;
2963#else
2964 printk(KERN_ERR "%pOF: ZMII support not enabled !\n", np);
2965 return -ENXIO;
2966#endif
2967 }
2968
2969 if (dev->rgmii_ph != 0) {
2970#ifdef CONFIG_IBM_EMAC_RGMII
2971 dev->features |= EMAC_FTR_HAS_RGMII;
2972#else
2973 printk(KERN_ERR "%pOF: RGMII support not enabled !\n", np);
2974 return -ENXIO;
2975#endif
2976 }
2977
2978
2979 p = of_get_property(np, "local-mac-address", NULL);
2980 if (p == NULL) {
2981 printk(KERN_ERR "%pOF: Can't find local-mac-address property\n",
2982 np);
2983 return -ENXIO;
2984 }
2985 memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
2986
2987
2988 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2989 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2990 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2991 } else {
2992 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2993 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2994 }
2995
2996
2997 if (WARN_ON(EMAC_XAHT_REGS(dev) > EMAC_XAHT_MAX_REGS))
2998 return -ENXIO;
2999
3000 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
3001 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
3002 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
3003 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
3004 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
3005
3006 return 0;
3007}
3008
3009static const struct net_device_ops emac_netdev_ops = {
3010 .ndo_open = emac_open,
3011 .ndo_stop = emac_close,
3012 .ndo_get_stats = emac_stats,
3013 .ndo_set_rx_mode = emac_set_multicast_list,
3014 .ndo_do_ioctl = emac_ioctl,
3015 .ndo_tx_timeout = emac_tx_timeout,
3016 .ndo_validate_addr = eth_validate_addr,
3017 .ndo_set_mac_address = emac_set_mac_address,
3018 .ndo_start_xmit = emac_start_xmit,
3019};
3020
3021static const struct net_device_ops emac_gige_netdev_ops = {
3022 .ndo_open = emac_open,
3023 .ndo_stop = emac_close,
3024 .ndo_get_stats = emac_stats,
3025 .ndo_set_rx_mode = emac_set_multicast_list,
3026 .ndo_do_ioctl = emac_ioctl,
3027 .ndo_tx_timeout = emac_tx_timeout,
3028 .ndo_validate_addr = eth_validate_addr,
3029 .ndo_set_mac_address = emac_set_mac_address,
3030 .ndo_start_xmit = emac_start_xmit_sg,
3031 .ndo_change_mtu = emac_change_mtu,
3032};
3033
3034static int emac_probe(struct platform_device *ofdev)
3035{
3036 struct net_device *ndev;
3037 struct emac_instance *dev;
3038 struct device_node *np = ofdev->dev.of_node;
3039 struct device_node **blist = NULL;
3040 int err, i;
3041
3042
3043
3044
3045
3046 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
3047 return -ENODEV;
3048
3049
3050 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3051 if (emac_boot_list[i] == np)
3052 blist = &emac_boot_list[i];
3053
3054
3055 err = -ENOMEM;
3056 ndev = alloc_etherdev(sizeof(struct emac_instance));
3057 if (!ndev)
3058 goto err_gone;
3059
3060 dev = netdev_priv(ndev);
3061 dev->ndev = ndev;
3062 dev->ofdev = ofdev;
3063 dev->blist = blist;
3064 SET_NETDEV_DEV(ndev, &ofdev->dev);
3065
3066
3067 mutex_init(&dev->mdio_lock);
3068 mutex_init(&dev->link_lock);
3069 spin_lock_init(&dev->lock);
3070 INIT_WORK(&dev->reset_work, emac_reset_work);
3071
3072
3073 err = emac_init_config(dev);
3074 if (err)
3075 goto err_free;
3076
3077
3078 dev->emac_irq = irq_of_parse_and_map(np, 0);
3079 dev->wol_irq = irq_of_parse_and_map(np, 1);
3080 if (!dev->emac_irq) {
3081 printk(KERN_ERR "%pOF: Can't map main interrupt\n", np);
3082 err = -ENODEV;
3083 goto err_free;
3084 }
3085 ndev->irq = dev->emac_irq;
3086
3087
3088
3089 dev->emacp = of_iomap(np, 0);
3090 if (dev->emacp == NULL) {
3091 printk(KERN_ERR "%pOF: Can't map device registers!\n", np);
3092 err = -ENOMEM;
3093 goto err_irq_unmap;
3094 }
3095
3096
3097 err = emac_wait_deps(dev);
3098 if (err) {
3099 printk(KERN_ERR
3100 "%pOF: Timeout waiting for dependent devices\n", np);
3101
3102 goto err_reg_unmap;
3103 }
3104 dev->mal = platform_get_drvdata(dev->mal_dev);
3105 if (dev->mdio_dev != NULL)
3106 dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
3107
3108
3109 dev->commac.ops = &emac_commac_ops;
3110 dev->commac.dev = dev;
3111 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
3112 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
3113 err = mal_register_commac(dev->mal, &dev->commac);
3114 if (err) {
3115 printk(KERN_ERR "%pOF: failed to register with mal %pOF!\n",
3116 np, dev->mal_dev->dev.of_node);
3117 goto err_rel_deps;
3118 }
3119 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
3120 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
3121
3122
3123 dev->tx_desc =
3124 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
3125 dev->rx_desc =
3126 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
3127
3128 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
3129 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
3130
3131
3132 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
3133 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
3134 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
3135 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
3136
3137
3138 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
3139 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
3140 goto err_unreg_commac;
3141
3142
3143 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
3144 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
3145 goto err_detach_zmii;
3146
3147
3148 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
3149 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
3150 goto err_detach_rgmii;
3151
3152
3153 dev->phy.speed = SPEED_100;
3154 dev->phy.duplex = DUPLEX_FULL;
3155 dev->phy.autoneg = AUTONEG_DISABLE;
3156 dev->phy.pause = dev->phy.asym_pause = 0;
3157 dev->stop_timeout = STOP_TIMEOUT_100;
3158 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
3159
3160
3161 if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
3162 dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
3163 SUPPORTED_100baseT_Half |
3164 SUPPORTED_10baseT_Half);
3165 }
3166
3167
3168 err = emac_init_phy(dev);
3169 if (err != 0)
3170 goto err_detach_tah;
3171
3172 if (dev->tah_dev) {
3173 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
3174 ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
3175 }
3176 ndev->watchdog_timeo = 5 * HZ;
3177 if (emac_phy_supports_gige(dev->phy_mode)) {
3178 ndev->netdev_ops = &emac_gige_netdev_ops;
3179 dev->commac.ops = &emac_commac_sg_ops;
3180 } else
3181 ndev->netdev_ops = &emac_netdev_ops;
3182 ndev->ethtool_ops = &emac_ethtool_ops;
3183
3184
3185 ndev->min_mtu = EMAC_MIN_MTU;
3186 ndev->max_mtu = dev->max_mtu;
3187
3188 netif_carrier_off(ndev);
3189
3190 err = register_netdev(ndev);
3191 if (err) {
3192 printk(KERN_ERR "%pOF: failed to register net device (%d)!\n",
3193 np, err);
3194 goto err_detach_tah;
3195 }
3196
3197
3198
3199
3200 wmb();
3201 platform_set_drvdata(ofdev, dev);
3202
3203
3204 wake_up_all(&emac_probe_wait);
3205
3206
3207 printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
3208 ndev->name, dev->cell_index, np, ndev->dev_addr);
3209
3210 if (dev->phy_mode == PHY_INTERFACE_MODE_SGMII)
3211 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
3212
3213 if (dev->phy.address >= 0)
3214 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
3215 dev->phy.def->name, dev->phy.address);
3216
3217
3218 return 0;
3219
3220
3221
3222 err_detach_tah:
3223 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3224 tah_detach(dev->tah_dev, dev->tah_port);
3225 err_detach_rgmii:
3226 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3227 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3228 err_detach_zmii:
3229 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3230 zmii_detach(dev->zmii_dev, dev->zmii_port);
3231 err_unreg_commac:
3232 mal_unregister_commac(dev->mal, &dev->commac);
3233 err_rel_deps:
3234 emac_put_deps(dev);
3235 err_reg_unmap:
3236 iounmap(dev->emacp);
3237 err_irq_unmap:
3238 if (dev->wol_irq)
3239 irq_dispose_mapping(dev->wol_irq);
3240 if (dev->emac_irq)
3241 irq_dispose_mapping(dev->emac_irq);
3242 err_free:
3243 free_netdev(ndev);
3244 err_gone:
3245
3246
3247
3248
3249 if (blist) {
3250 *blist = NULL;
3251 wake_up_all(&emac_probe_wait);
3252 }
3253 return err;
3254}
3255
3256static int emac_remove(struct platform_device *ofdev)
3257{
3258 struct emac_instance *dev = platform_get_drvdata(ofdev);
3259
3260 DBG(dev, "remove" NL);
3261
3262 unregister_netdev(dev->ndev);
3263
3264 cancel_work_sync(&dev->reset_work);
3265
3266 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3267 tah_detach(dev->tah_dev, dev->tah_port);
3268 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3269 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3270 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3271 zmii_detach(dev->zmii_dev, dev->zmii_port);
3272
3273 if (dev->phy_dev)
3274 phy_disconnect(dev->phy_dev);
3275
3276 if (dev->mii_bus)
3277 mdiobus_unregister(dev->mii_bus);
3278
3279 busy_phy_map &= ~(1 << dev->phy.address);
3280 DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
3281
3282 mal_unregister_commac(dev->mal, &dev->commac);
3283 emac_put_deps(dev);
3284
3285 iounmap(dev->emacp);
3286
3287 if (dev->wol_irq)
3288 irq_dispose_mapping(dev->wol_irq);
3289 if (dev->emac_irq)
3290 irq_dispose_mapping(dev->emac_irq);
3291
3292 free_netdev(dev->ndev);
3293
3294 return 0;
3295}
3296
3297
3298static const struct of_device_id emac_match[] =
3299{
3300 {
3301 .type = "network",
3302 .compatible = "ibm,emac",
3303 },
3304 {
3305 .type = "network",
3306 .compatible = "ibm,emac4",
3307 },
3308 {
3309 .type = "network",
3310 .compatible = "ibm,emac4sync",
3311 },
3312 {},
3313};
3314MODULE_DEVICE_TABLE(of, emac_match);
3315
3316static struct platform_driver emac_driver = {
3317 .driver = {
3318 .name = "emac",
3319 .of_match_table = emac_match,
3320 },
3321 .probe = emac_probe,
3322 .remove = emac_remove,
3323};
3324
3325static void __init emac_make_bootlist(void)
3326{
3327 struct device_node *np = NULL;
3328 int j, max, i = 0;
3329 int cell_indices[EMAC_BOOT_LIST_SIZE];
3330
3331
3332 while((np = of_find_all_nodes(np)) != NULL) {
3333 const u32 *idx;
3334
3335 if (of_match_node(emac_match, np) == NULL)
3336 continue;
3337 if (of_get_property(np, "unused", NULL))
3338 continue;
3339 idx = of_get_property(np, "cell-index", NULL);
3340 if (idx == NULL)
3341 continue;
3342 cell_indices[i] = *idx;
3343 emac_boot_list[i++] = of_node_get(np);
3344 if (i >= EMAC_BOOT_LIST_SIZE) {
3345 of_node_put(np);
3346 break;
3347 }
3348 }
3349 max = i;
3350
3351
3352 for (i = 0; max > 1 && (i < (max - 1)); i++)
3353 for (j = i; j < max; j++) {
3354 if (cell_indices[i] > cell_indices[j]) {
3355 swap(emac_boot_list[i], emac_boot_list[j]);
3356 swap(cell_indices[i], cell_indices[j]);
3357 }
3358 }
3359}
3360
3361static int __init emac_init(void)
3362{
3363 int rc;
3364
3365 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3366
3367
3368 emac_make_bootlist();
3369
3370
3371 rc = mal_init();
3372 if (rc)
3373 goto err;
3374 rc = zmii_init();
3375 if (rc)
3376 goto err_mal;
3377 rc = rgmii_init();
3378 if (rc)
3379 goto err_zmii;
3380 rc = tah_init();
3381 if (rc)
3382 goto err_rgmii;
3383 rc = platform_driver_register(&emac_driver);
3384 if (rc)
3385 goto err_tah;
3386
3387 return 0;
3388
3389 err_tah:
3390 tah_exit();
3391 err_rgmii:
3392 rgmii_exit();
3393 err_zmii:
3394 zmii_exit();
3395 err_mal:
3396 mal_exit();
3397 err:
3398 return rc;
3399}
3400
3401static void __exit emac_exit(void)
3402{
3403 int i;
3404
3405 platform_driver_unregister(&emac_driver);
3406
3407 tah_exit();
3408 rgmii_exit();
3409 zmii_exit();
3410 mal_exit();
3411
3412
3413 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3414 of_node_put(emac_boot_list[i]);
3415}
3416
3417module_init(emac_init);
3418module_exit(emac_exit);
3419