1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/module.h>
14#include <linux/ioport.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/interrupt.h>
18#include <linux/skbuff.h>
19#include <linux/spinlock.h>
20#include <linux/crc32.h>
21#include <linux/mii.h>
22#include <linux/of.h>
23#include <linux/of_net.h>
24#include <linux/ethtool.h>
25#include <linux/dm9000.h>
26#include <linux/delay.h>
27#include <linux/platform_device.h>
28#include <linux/irq.h>
29#include <linux/slab.h>
30#include <linux/regulator/consumer.h>
31#include <linux/gpio.h>
32#include <linux/of_gpio.h>
33
34#include <asm/delay.h>
35#include <asm/irq.h>
36#include <asm/io.h>
37
38#include "dm9000.h"
39
40
41
42#define DM9000_PHY 0x40
43
44#define CARDNAME "dm9000"
45
46
47
48
49static int watchdog = 5000;
50module_param(watchdog, int, 0400);
51MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
52
53
54
55
56static int debug;
57module_param(debug, int, 0644);
58MODULE_PARM_DESC(debug, "dm9000 debug level (0-6)");
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82enum dm9000_type {
83 TYPE_DM9000E,
84 TYPE_DM9000A,
85 TYPE_DM9000B
86};
87
88
89struct board_info {
90
91 void __iomem *io_addr;
92 void __iomem *io_data;
93 u16 irq;
94
95 u16 tx_pkt_cnt;
96 u16 queue_pkt_len;
97 u16 queue_start_addr;
98 u16 queue_ip_summed;
99 u16 dbug_cnt;
100 u8 io_mode;
101 u8 phy_addr;
102 u8 imr_all;
103
104 unsigned int flags;
105 unsigned int in_timeout:1;
106 unsigned int in_suspend:1;
107 unsigned int wake_supported:1;
108
109 enum dm9000_type type;
110
111 void (*inblk)(void __iomem *port, void *data, int length);
112 void (*outblk)(void __iomem *port, void *data, int length);
113 void (*dumpblk)(void __iomem *port, int length);
114
115 struct device *dev;
116
117 struct resource *addr_res;
118 struct resource *data_res;
119 struct resource *addr_req;
120 struct resource *data_req;
121
122 int irq_wake;
123
124 struct mutex addr_lock;
125
126 struct delayed_work phy_poll;
127 struct net_device *ndev;
128
129 spinlock_t lock;
130
131 struct mii_if_info mii;
132 u32 msg_enable;
133 u32 wake_state;
134
135 int ip_summed;
136
137 struct regulator *power_supply;
138};
139
140
141
142#define dm9000_dbg(db, lev, msg...) do { \
143 if ((lev) < debug) { \
144 dev_dbg(db->dev, msg); \
145 } \
146} while (0)
147
148static inline struct board_info *to_dm9000_board(struct net_device *dev)
149{
150 return netdev_priv(dev);
151}
152
153
154
155
156
157
158static u8
159ior(struct board_info *db, int reg)
160{
161 writeb(reg, db->io_addr);
162 return readb(db->io_data);
163}
164
165
166
167
168
169static void
170iow(struct board_info *db, int reg, int value)
171{
172 writeb(reg, db->io_addr);
173 writeb(value, db->io_data);
174}
175
176static void
177dm9000_reset(struct board_info *db)
178{
179 dev_dbg(db->dev, "resetting device\n");
180
181
182
183
184
185 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
186 udelay(100);
187 if (ior(db, DM9000_NCR) & 1)
188 dev_err(db->dev, "dm9000 did not respond to first reset\n");
189
190 iow(db, DM9000_NCR, 0);
191 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
192 udelay(100);
193 if (ior(db, DM9000_NCR) & 1)
194 dev_err(db->dev, "dm9000 did not respond to second reset\n");
195}
196
197
198
199static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
200{
201 iowrite8_rep(reg, data, count);
202}
203
204static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
205{
206 iowrite16_rep(reg, data, (count+1) >> 1);
207}
208
209static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
210{
211 iowrite32_rep(reg, data, (count+3) >> 2);
212}
213
214
215
216static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
217{
218 ioread8_rep(reg, data, count);
219}
220
221
222static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
223{
224 ioread16_rep(reg, data, (count+1) >> 1);
225}
226
227static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
228{
229 ioread32_rep(reg, data, (count+3) >> 2);
230}
231
232
233
234static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
235{
236 int i;
237
238 for (i = 0; i < count; i++)
239 readb(reg);
240}
241
242static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
243{
244 int i;
245
246 count = (count + 1) >> 1;
247
248 for (i = 0; i < count; i++)
249 readw(reg);
250}
251
252static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
253{
254 int i;
255
256 count = (count + 3) >> 2;
257
258 for (i = 0; i < count; i++)
259 readl(reg);
260}
261
262
263
264
265
266static void dm9000_msleep(struct board_info *db, unsigned int ms)
267{
268 if (db->in_suspend || db->in_timeout)
269 mdelay(ms);
270 else
271 msleep(ms);
272}
273
274
275static int
276dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
277{
278 struct board_info *db = netdev_priv(dev);
279 unsigned long flags;
280 unsigned int reg_save;
281 int ret;
282
283 mutex_lock(&db->addr_lock);
284
285 spin_lock_irqsave(&db->lock, flags);
286
287
288 reg_save = readb(db->io_addr);
289
290
291 iow(db, DM9000_EPAR, DM9000_PHY | reg);
292
293
294 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
295
296 writeb(reg_save, db->io_addr);
297 spin_unlock_irqrestore(&db->lock, flags);
298
299 dm9000_msleep(db, 1);
300
301 spin_lock_irqsave(&db->lock, flags);
302 reg_save = readb(db->io_addr);
303
304 iow(db, DM9000_EPCR, 0x0);
305
306
307 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
308
309
310 writeb(reg_save, db->io_addr);
311 spin_unlock_irqrestore(&db->lock, flags);
312
313 mutex_unlock(&db->addr_lock);
314
315 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
316 return ret;
317}
318
319
320static void
321dm9000_phy_write(struct net_device *dev,
322 int phyaddr_unused, int reg, int value)
323{
324 struct board_info *db = netdev_priv(dev);
325 unsigned long flags;
326 unsigned long reg_save;
327
328 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
329 if (!db->in_timeout)
330 mutex_lock(&db->addr_lock);
331
332 spin_lock_irqsave(&db->lock, flags);
333
334
335 reg_save = readb(db->io_addr);
336
337
338 iow(db, DM9000_EPAR, DM9000_PHY | reg);
339
340
341 iow(db, DM9000_EPDRL, value);
342 iow(db, DM9000_EPDRH, value >> 8);
343
344
345 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
346
347 writeb(reg_save, db->io_addr);
348 spin_unlock_irqrestore(&db->lock, flags);
349
350 dm9000_msleep(db, 1);
351
352 spin_lock_irqsave(&db->lock, flags);
353 reg_save = readb(db->io_addr);
354
355 iow(db, DM9000_EPCR, 0x0);
356
357
358 writeb(reg_save, db->io_addr);
359
360 spin_unlock_irqrestore(&db->lock, flags);
361 if (!db->in_timeout)
362 mutex_unlock(&db->addr_lock);
363}
364
365
366
367
368
369
370
371static void dm9000_set_io(struct board_info *db, int byte_width)
372{
373
374
375
376
377 switch (byte_width) {
378 case 1:
379 db->dumpblk = dm9000_dumpblk_8bit;
380 db->outblk = dm9000_outblk_8bit;
381 db->inblk = dm9000_inblk_8bit;
382 break;
383
384
385 case 3:
386 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
387 fallthrough;
388 case 2:
389 db->dumpblk = dm9000_dumpblk_16bit;
390 db->outblk = dm9000_outblk_16bit;
391 db->inblk = dm9000_inblk_16bit;
392 break;
393
394 case 4:
395 default:
396 db->dumpblk = dm9000_dumpblk_32bit;
397 db->outblk = dm9000_outblk_32bit;
398 db->inblk = dm9000_inblk_32bit;
399 break;
400 }
401}
402
403static void dm9000_schedule_poll(struct board_info *db)
404{
405 if (db->type == TYPE_DM9000E)
406 schedule_delayed_work(&db->phy_poll, HZ * 2);
407}
408
409static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
410{
411 struct board_info *dm = to_dm9000_board(dev);
412
413 if (!netif_running(dev))
414 return -EINVAL;
415
416 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
417}
418
419static unsigned int
420dm9000_read_locked(struct board_info *db, int reg)
421{
422 unsigned long flags;
423 unsigned int ret;
424
425 spin_lock_irqsave(&db->lock, flags);
426 ret = ior(db, reg);
427 spin_unlock_irqrestore(&db->lock, flags);
428
429 return ret;
430}
431
432static int dm9000_wait_eeprom(struct board_info *db)
433{
434 unsigned int status;
435 int timeout = 8;
436
437
438
439
440
441
442
443
444
445
446
447
448 while (1) {
449 status = dm9000_read_locked(db, DM9000_EPCR);
450
451 if ((status & EPCR_ERRE) == 0)
452 break;
453
454 msleep(1);
455
456 if (timeout-- < 0) {
457 dev_dbg(db->dev, "timeout waiting EEPROM\n");
458 break;
459 }
460 }
461
462 return 0;
463}
464
465
466
467
468static void
469dm9000_read_eeprom(struct board_info *db, int offset, u8 *to)
470{
471 unsigned long flags;
472
473 if (db->flags & DM9000_PLATF_NO_EEPROM) {
474 to[0] = 0xff;
475 to[1] = 0xff;
476 return;
477 }
478
479 mutex_lock(&db->addr_lock);
480
481 spin_lock_irqsave(&db->lock, flags);
482
483 iow(db, DM9000_EPAR, offset);
484 iow(db, DM9000_EPCR, EPCR_ERPRR);
485
486 spin_unlock_irqrestore(&db->lock, flags);
487
488 dm9000_wait_eeprom(db);
489
490
491 msleep(1);
492
493 spin_lock_irqsave(&db->lock, flags);
494
495 iow(db, DM9000_EPCR, 0x0);
496
497 to[0] = ior(db, DM9000_EPDRL);
498 to[1] = ior(db, DM9000_EPDRH);
499
500 spin_unlock_irqrestore(&db->lock, flags);
501
502 mutex_unlock(&db->addr_lock);
503}
504
505
506
507
508static void
509dm9000_write_eeprom(struct board_info *db, int offset, u8 *data)
510{
511 unsigned long flags;
512
513 if (db->flags & DM9000_PLATF_NO_EEPROM)
514 return;
515
516 mutex_lock(&db->addr_lock);
517
518 spin_lock_irqsave(&db->lock, flags);
519 iow(db, DM9000_EPAR, offset);
520 iow(db, DM9000_EPDRH, data[1]);
521 iow(db, DM9000_EPDRL, data[0]);
522 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
523 spin_unlock_irqrestore(&db->lock, flags);
524
525 dm9000_wait_eeprom(db);
526
527 mdelay(1);
528
529 spin_lock_irqsave(&db->lock, flags);
530 iow(db, DM9000_EPCR, 0);
531 spin_unlock_irqrestore(&db->lock, flags);
532
533 mutex_unlock(&db->addr_lock);
534}
535
536
537
538static void dm9000_get_drvinfo(struct net_device *dev,
539 struct ethtool_drvinfo *info)
540{
541 struct board_info *dm = to_dm9000_board(dev);
542
543 strlcpy(info->driver, CARDNAME, sizeof(info->driver));
544 strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
545 sizeof(info->bus_info));
546}
547
548static u32 dm9000_get_msglevel(struct net_device *dev)
549{
550 struct board_info *dm = to_dm9000_board(dev);
551
552 return dm->msg_enable;
553}
554
555static void dm9000_set_msglevel(struct net_device *dev, u32 value)
556{
557 struct board_info *dm = to_dm9000_board(dev);
558
559 dm->msg_enable = value;
560}
561
562static int dm9000_get_link_ksettings(struct net_device *dev,
563 struct ethtool_link_ksettings *cmd)
564{
565 struct board_info *dm = to_dm9000_board(dev);
566
567 mii_ethtool_get_link_ksettings(&dm->mii, cmd);
568 return 0;
569}
570
571static int dm9000_set_link_ksettings(struct net_device *dev,
572 const struct ethtool_link_ksettings *cmd)
573{
574 struct board_info *dm = to_dm9000_board(dev);
575
576 return mii_ethtool_set_link_ksettings(&dm->mii, cmd);
577}
578
579static int dm9000_nway_reset(struct net_device *dev)
580{
581 struct board_info *dm = to_dm9000_board(dev);
582 return mii_nway_restart(&dm->mii);
583}
584
585static int dm9000_set_features(struct net_device *dev,
586 netdev_features_t features)
587{
588 struct board_info *dm = to_dm9000_board(dev);
589 netdev_features_t changed = dev->features ^ features;
590 unsigned long flags;
591
592 if (!(changed & NETIF_F_RXCSUM))
593 return 0;
594
595 spin_lock_irqsave(&dm->lock, flags);
596 iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
597 spin_unlock_irqrestore(&dm->lock, flags);
598
599 return 0;
600}
601
602static u32 dm9000_get_link(struct net_device *dev)
603{
604 struct board_info *dm = to_dm9000_board(dev);
605 u32 ret;
606
607 if (dm->flags & DM9000_PLATF_EXT_PHY)
608 ret = mii_link_ok(&dm->mii);
609 else
610 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
611
612 return ret;
613}
614
615#define DM_EEPROM_MAGIC (0x444D394B)
616
617static int dm9000_get_eeprom_len(struct net_device *dev)
618{
619 return 128;
620}
621
622static int dm9000_get_eeprom(struct net_device *dev,
623 struct ethtool_eeprom *ee, u8 *data)
624{
625 struct board_info *dm = to_dm9000_board(dev);
626 int offset = ee->offset;
627 int len = ee->len;
628 int i;
629
630
631
632 if ((len & 1) != 0 || (offset & 1) != 0)
633 return -EINVAL;
634
635 if (dm->flags & DM9000_PLATF_NO_EEPROM)
636 return -ENOENT;
637
638 ee->magic = DM_EEPROM_MAGIC;
639
640 for (i = 0; i < len; i += 2)
641 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
642
643 return 0;
644}
645
646static int dm9000_set_eeprom(struct net_device *dev,
647 struct ethtool_eeprom *ee, u8 *data)
648{
649 struct board_info *dm = to_dm9000_board(dev);
650 int offset = ee->offset;
651 int len = ee->len;
652 int done;
653
654
655
656 if (dm->flags & DM9000_PLATF_NO_EEPROM)
657 return -ENOENT;
658
659 if (ee->magic != DM_EEPROM_MAGIC)
660 return -EINVAL;
661
662 while (len > 0) {
663 if (len & 1 || offset & 1) {
664 int which = offset & 1;
665 u8 tmp[2];
666
667 dm9000_read_eeprom(dm, offset / 2, tmp);
668 tmp[which] = *data;
669 dm9000_write_eeprom(dm, offset / 2, tmp);
670
671 done = 1;
672 } else {
673 dm9000_write_eeprom(dm, offset / 2, data);
674 done = 2;
675 }
676
677 data += done;
678 offset += done;
679 len -= done;
680 }
681
682 return 0;
683}
684
685static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
686{
687 struct board_info *dm = to_dm9000_board(dev);
688
689 memset(w, 0, sizeof(struct ethtool_wolinfo));
690
691
692 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
693 w->wolopts = dm->wake_state;
694}
695
696static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
697{
698 struct board_info *dm = to_dm9000_board(dev);
699 unsigned long flags;
700 u32 opts = w->wolopts;
701 u32 wcr = 0;
702
703 if (!dm->wake_supported)
704 return -EOPNOTSUPP;
705
706 if (opts & ~WAKE_MAGIC)
707 return -EINVAL;
708
709 if (opts & WAKE_MAGIC)
710 wcr |= WCR_MAGICEN;
711
712 mutex_lock(&dm->addr_lock);
713
714 spin_lock_irqsave(&dm->lock, flags);
715 iow(dm, DM9000_WCR, wcr);
716 spin_unlock_irqrestore(&dm->lock, flags);
717
718 mutex_unlock(&dm->addr_lock);
719
720 if (dm->wake_state != opts) {
721
722
723 if (!dm->wake_state)
724 irq_set_irq_wake(dm->irq_wake, 1);
725 else if (dm->wake_state && !opts)
726 irq_set_irq_wake(dm->irq_wake, 0);
727 }
728
729 dm->wake_state = opts;
730 return 0;
731}
732
733static const struct ethtool_ops dm9000_ethtool_ops = {
734 .get_drvinfo = dm9000_get_drvinfo,
735 .get_msglevel = dm9000_get_msglevel,
736 .set_msglevel = dm9000_set_msglevel,
737 .nway_reset = dm9000_nway_reset,
738 .get_link = dm9000_get_link,
739 .get_wol = dm9000_get_wol,
740 .set_wol = dm9000_set_wol,
741 .get_eeprom_len = dm9000_get_eeprom_len,
742 .get_eeprom = dm9000_get_eeprom,
743 .set_eeprom = dm9000_set_eeprom,
744 .get_link_ksettings = dm9000_get_link_ksettings,
745 .set_link_ksettings = dm9000_set_link_ksettings,
746};
747
748static void dm9000_show_carrier(struct board_info *db,
749 unsigned carrier, unsigned nsr)
750{
751 int lpa;
752 struct net_device *ndev = db->ndev;
753 struct mii_if_info *mii = &db->mii;
754 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
755
756 if (carrier) {
757 lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
758 dev_info(db->dev,
759 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
760 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
761 (ncr & NCR_FDX) ? "full" : "half", lpa);
762 } else {
763 dev_info(db->dev, "%s: link down\n", ndev->name);
764 }
765}
766
767static void
768dm9000_poll_work(struct work_struct *w)
769{
770 struct delayed_work *dw = to_delayed_work(w);
771 struct board_info *db = container_of(dw, struct board_info, phy_poll);
772 struct net_device *ndev = db->ndev;
773
774 if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
775 !(db->flags & DM9000_PLATF_EXT_PHY)) {
776 unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
777 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
778 unsigned new_carrier;
779
780 new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
781
782 if (old_carrier != new_carrier) {
783 if (netif_msg_link(db))
784 dm9000_show_carrier(db, new_carrier, nsr);
785
786 if (!new_carrier)
787 netif_carrier_off(ndev);
788 else
789 netif_carrier_on(ndev);
790 }
791 } else
792 mii_check_media(&db->mii, netif_msg_link(db), 0);
793
794 if (netif_running(ndev))
795 dm9000_schedule_poll(db);
796}
797
798
799
800
801
802
803static void
804dm9000_release_board(struct platform_device *pdev, struct board_info *db)
805{
806
807
808 iounmap(db->io_addr);
809 iounmap(db->io_data);
810
811
812
813 if (db->data_req)
814 release_resource(db->data_req);
815 kfree(db->data_req);
816
817 if (db->addr_req)
818 release_resource(db->addr_req);
819 kfree(db->addr_req);
820}
821
822static unsigned char dm9000_type_to_char(enum dm9000_type type)
823{
824 switch (type) {
825 case TYPE_DM9000E: return 'e';
826 case TYPE_DM9000A: return 'a';
827 case TYPE_DM9000B: return 'b';
828 }
829
830 return '?';
831}
832
833
834
835
836static void
837dm9000_hash_table_unlocked(struct net_device *dev)
838{
839 struct board_info *db = netdev_priv(dev);
840 struct netdev_hw_addr *ha;
841 int i, oft;
842 u32 hash_val;
843 u16 hash_table[4] = { 0, 0, 0, 0x8000 };
844 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
845
846 dm9000_dbg(db, 1, "entering %s\n", __func__);
847
848 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
849 iow(db, oft, dev->dev_addr[i]);
850
851 if (dev->flags & IFF_PROMISC)
852 rcr |= RCR_PRMSC;
853
854 if (dev->flags & IFF_ALLMULTI)
855 rcr |= RCR_ALL;
856
857
858 netdev_for_each_mc_addr(ha, dev) {
859 hash_val = ether_crc_le(6, ha->addr) & 0x3f;
860 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
861 }
862
863
864 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
865 iow(db, oft++, hash_table[i]);
866 iow(db, oft++, hash_table[i] >> 8);
867 }
868
869 iow(db, DM9000_RCR, rcr);
870}
871
872static void
873dm9000_hash_table(struct net_device *dev)
874{
875 struct board_info *db = netdev_priv(dev);
876 unsigned long flags;
877
878 spin_lock_irqsave(&db->lock, flags);
879 dm9000_hash_table_unlocked(dev);
880 spin_unlock_irqrestore(&db->lock, flags);
881}
882
883static void
884dm9000_mask_interrupts(struct board_info *db)
885{
886 iow(db, DM9000_IMR, IMR_PAR);
887}
888
889static void
890dm9000_unmask_interrupts(struct board_info *db)
891{
892 iow(db, DM9000_IMR, db->imr_all);
893}
894
895
896
897
898static void
899dm9000_init_dm9000(struct net_device *dev)
900{
901 struct board_info *db = netdev_priv(dev);
902 unsigned int imr;
903 unsigned int ncr;
904
905 dm9000_dbg(db, 1, "entering %s\n", __func__);
906
907 dm9000_reset(db);
908 dm9000_mask_interrupts(db);
909
910
911 db->io_mode = ior(db, DM9000_ISR) >> 6;
912
913
914 if (dev->hw_features & NETIF_F_RXCSUM)
915 iow(db, DM9000_RCSR,
916 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
917
918 iow(db, DM9000_GPCR, GPCR_GEP_CNTL);
919 iow(db, DM9000_GPR, 0);
920
921
922
923
924 if (db->type == TYPE_DM9000B) {
925 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
926 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
927 }
928
929 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
930
931
932
933
934 if (db->wake_supported)
935 ncr |= NCR_WAKEEN;
936
937 iow(db, DM9000_NCR, ncr);
938
939
940 iow(db, DM9000_TCR, 0);
941 iow(db, DM9000_BPTR, 0x3f);
942 iow(db, DM9000_FCR, 0xff);
943 iow(db, DM9000_SMCR, 0);
944
945 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
946 iow(db, DM9000_ISR, ISR_CLR_STATUS);
947
948
949 dm9000_hash_table_unlocked(dev);
950
951 imr = IMR_PAR | IMR_PTM | IMR_PRM;
952 if (db->type != TYPE_DM9000E)
953 imr |= IMR_LNKCHNG;
954
955 db->imr_all = imr;
956
957
958 db->tx_pkt_cnt = 0;
959 db->queue_pkt_len = 0;
960 netif_trans_update(dev);
961}
962
963
964static void dm9000_timeout(struct net_device *dev, unsigned int txqueue)
965{
966 struct board_info *db = netdev_priv(dev);
967 u8 reg_save;
968 unsigned long flags;
969
970
971 spin_lock_irqsave(&db->lock, flags);
972 db->in_timeout = 1;
973 reg_save = readb(db->io_addr);
974
975 netif_stop_queue(dev);
976 dm9000_init_dm9000(dev);
977 dm9000_unmask_interrupts(db);
978
979 netif_trans_update(dev);
980 netif_wake_queue(dev);
981
982
983 writeb(reg_save, db->io_addr);
984 db->in_timeout = 0;
985 spin_unlock_irqrestore(&db->lock, flags);
986}
987
988static void dm9000_send_packet(struct net_device *dev,
989 int ip_summed,
990 u16 pkt_len)
991{
992 struct board_info *dm = to_dm9000_board(dev);
993
994
995 if (dm->ip_summed != ip_summed) {
996 if (ip_summed == CHECKSUM_NONE)
997 iow(dm, DM9000_TCCR, 0);
998 else
999 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
1000 dm->ip_summed = ip_summed;
1001 }
1002
1003
1004 iow(dm, DM9000_TXPLL, pkt_len);
1005 iow(dm, DM9000_TXPLH, pkt_len >> 8);
1006
1007
1008 iow(dm, DM9000_TCR, TCR_TXREQ);
1009}
1010
1011
1012
1013
1014
1015static int
1016dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
1017{
1018 unsigned long flags;
1019 struct board_info *db = netdev_priv(dev);
1020
1021 dm9000_dbg(db, 3, "%s:\n", __func__);
1022
1023 if (db->tx_pkt_cnt > 1)
1024 return NETDEV_TX_BUSY;
1025
1026 spin_lock_irqsave(&db->lock, flags);
1027
1028
1029 writeb(DM9000_MWCMD, db->io_addr);
1030
1031 (db->outblk)(db->io_data, skb->data, skb->len);
1032 dev->stats.tx_bytes += skb->len;
1033
1034 db->tx_pkt_cnt++;
1035
1036 if (db->tx_pkt_cnt == 1) {
1037 dm9000_send_packet(dev, skb->ip_summed, skb->len);
1038 } else {
1039
1040 db->queue_pkt_len = skb->len;
1041 db->queue_ip_summed = skb->ip_summed;
1042 netif_stop_queue(dev);
1043 }
1044
1045 spin_unlock_irqrestore(&db->lock, flags);
1046
1047
1048 dev_consume_skb_any(skb);
1049
1050 return NETDEV_TX_OK;
1051}
1052
1053
1054
1055
1056
1057
1058static void dm9000_tx_done(struct net_device *dev, struct board_info *db)
1059{
1060 int tx_status = ior(db, DM9000_NSR);
1061
1062 if (tx_status & (NSR_TX2END | NSR_TX1END)) {
1063
1064 db->tx_pkt_cnt--;
1065 dev->stats.tx_packets++;
1066
1067 if (netif_msg_tx_done(db))
1068 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
1069
1070
1071 if (db->tx_pkt_cnt > 0)
1072 dm9000_send_packet(dev, db->queue_ip_summed,
1073 db->queue_pkt_len);
1074 netif_wake_queue(dev);
1075 }
1076}
1077
1078struct dm9000_rxhdr {
1079 u8 RxPktReady;
1080 u8 RxStatus;
1081 __le16 RxLen;
1082} __packed;
1083
1084
1085
1086
1087static void
1088dm9000_rx(struct net_device *dev)
1089{
1090 struct board_info *db = netdev_priv(dev);
1091 struct dm9000_rxhdr rxhdr;
1092 struct sk_buff *skb;
1093 u8 rxbyte, *rdptr;
1094 bool GoodPacket;
1095 int RxLen;
1096
1097
1098 do {
1099 ior(db, DM9000_MRCMDX);
1100
1101
1102 rxbyte = readb(db->io_data);
1103
1104
1105 if (rxbyte & DM9000_PKT_ERR) {
1106 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
1107 iow(db, DM9000_RCR, 0x00);
1108 return;
1109 }
1110
1111 if (!(rxbyte & DM9000_PKT_RDY))
1112 return;
1113
1114
1115 GoodPacket = true;
1116 writeb(DM9000_MRCMD, db->io_addr);
1117
1118 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
1119
1120 RxLen = le16_to_cpu(rxhdr.RxLen);
1121
1122 if (netif_msg_rx_status(db))
1123 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
1124 rxhdr.RxStatus, RxLen);
1125
1126
1127 if (RxLen < 0x40) {
1128 GoodPacket = false;
1129 if (netif_msg_rx_err(db))
1130 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
1131 }
1132
1133 if (RxLen > DM9000_PKT_MAX) {
1134 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
1135 }
1136
1137
1138 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1139 RSR_PLE | RSR_RWTO |
1140 RSR_LCS | RSR_RF)) {
1141 GoodPacket = false;
1142 if (rxhdr.RxStatus & RSR_FOE) {
1143 if (netif_msg_rx_err(db))
1144 dev_dbg(db->dev, "fifo error\n");
1145 dev->stats.rx_fifo_errors++;
1146 }
1147 if (rxhdr.RxStatus & RSR_CE) {
1148 if (netif_msg_rx_err(db))
1149 dev_dbg(db->dev, "crc error\n");
1150 dev->stats.rx_crc_errors++;
1151 }
1152 if (rxhdr.RxStatus & RSR_RF) {
1153 if (netif_msg_rx_err(db))
1154 dev_dbg(db->dev, "length error\n");
1155 dev->stats.rx_length_errors++;
1156 }
1157 }
1158
1159
1160 if (GoodPacket &&
1161 ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
1162 skb_reserve(skb, 2);
1163 rdptr = skb_put(skb, RxLen - 4);
1164
1165
1166
1167 (db->inblk)(db->io_data, rdptr, RxLen);
1168 dev->stats.rx_bytes += RxLen;
1169
1170
1171 skb->protocol = eth_type_trans(skb, dev);
1172 if (dev->features & NETIF_F_RXCSUM) {
1173 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1174 skb->ip_summed = CHECKSUM_UNNECESSARY;
1175 else
1176 skb_checksum_none_assert(skb);
1177 }
1178 netif_rx(skb);
1179 dev->stats.rx_packets++;
1180
1181 } else {
1182
1183
1184 (db->dumpblk)(db->io_data, RxLen);
1185 }
1186 } while (rxbyte & DM9000_PKT_RDY);
1187}
1188
1189static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1190{
1191 struct net_device *dev = dev_id;
1192 struct board_info *db = netdev_priv(dev);
1193 int int_status;
1194 unsigned long flags;
1195 u8 reg_save;
1196
1197 dm9000_dbg(db, 3, "entering %s\n", __func__);
1198
1199
1200
1201
1202 spin_lock_irqsave(&db->lock, flags);
1203
1204
1205 reg_save = readb(db->io_addr);
1206
1207 dm9000_mask_interrupts(db);
1208
1209 int_status = ior(db, DM9000_ISR);
1210 iow(db, DM9000_ISR, int_status);
1211
1212 if (netif_msg_intr(db))
1213 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1214
1215
1216 if (int_status & ISR_PRS)
1217 dm9000_rx(dev);
1218
1219
1220 if (int_status & ISR_PTS)
1221 dm9000_tx_done(dev, db);
1222
1223 if (db->type != TYPE_DM9000E) {
1224 if (int_status & ISR_LNKCHNG) {
1225
1226 schedule_delayed_work(&db->phy_poll, 1);
1227 }
1228 }
1229
1230 dm9000_unmask_interrupts(db);
1231
1232 writeb(reg_save, db->io_addr);
1233
1234 spin_unlock_irqrestore(&db->lock, flags);
1235
1236 return IRQ_HANDLED;
1237}
1238
1239static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1240{
1241 struct net_device *dev = dev_id;
1242 struct board_info *db = netdev_priv(dev);
1243 unsigned long flags;
1244 unsigned nsr, wcr;
1245
1246 spin_lock_irqsave(&db->lock, flags);
1247
1248 nsr = ior(db, DM9000_NSR);
1249 wcr = ior(db, DM9000_WCR);
1250
1251 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1252
1253 if (nsr & NSR_WAKEST) {
1254
1255 iow(db, DM9000_NSR, NSR_WAKEST);
1256
1257 if (wcr & WCR_LINKST)
1258 dev_info(db->dev, "wake by link status change\n");
1259 if (wcr & WCR_SAMPLEST)
1260 dev_info(db->dev, "wake by sample packet\n");
1261 if (wcr & WCR_MAGICST)
1262 dev_info(db->dev, "wake by magic packet\n");
1263 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1264 dev_err(db->dev, "wake signalled with no reason? "
1265 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1266 }
1267
1268 spin_unlock_irqrestore(&db->lock, flags);
1269
1270 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1271}
1272
1273#ifdef CONFIG_NET_POLL_CONTROLLER
1274
1275
1276
1277static void dm9000_poll_controller(struct net_device *dev)
1278{
1279 disable_irq(dev->irq);
1280 dm9000_interrupt(dev->irq, dev);
1281 enable_irq(dev->irq);
1282}
1283#endif
1284
1285
1286
1287
1288
1289static int
1290dm9000_open(struct net_device *dev)
1291{
1292 struct board_info *db = netdev_priv(dev);
1293 unsigned int irq_flags = irq_get_trigger_type(dev->irq);
1294
1295 if (netif_msg_ifup(db))
1296 dev_dbg(db->dev, "enabling %s\n", dev->name);
1297
1298
1299
1300
1301 if (irq_flags == IRQF_TRIGGER_NONE)
1302 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1303
1304 irq_flags |= IRQF_SHARED;
1305
1306
1307 iow(db, DM9000_GPR, 0);
1308 mdelay(1);
1309
1310
1311 dm9000_init_dm9000(dev);
1312
1313 if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
1314 return -EAGAIN;
1315
1316
1317
1318 dm9000_unmask_interrupts(db);
1319
1320
1321 db->dbug_cnt = 0;
1322
1323 mii_check_media(&db->mii, netif_msg_link(db), 1);
1324 netif_start_queue(dev);
1325
1326
1327 schedule_delayed_work(&db->phy_poll, 1);
1328
1329 return 0;
1330}
1331
1332static void
1333dm9000_shutdown(struct net_device *dev)
1334{
1335 struct board_info *db = netdev_priv(dev);
1336
1337
1338 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
1339 iow(db, DM9000_GPR, 0x01);
1340 dm9000_mask_interrupts(db);
1341 iow(db, DM9000_RCR, 0x00);
1342}
1343
1344
1345
1346
1347
1348static int
1349dm9000_stop(struct net_device *ndev)
1350{
1351 struct board_info *db = netdev_priv(ndev);
1352
1353 if (netif_msg_ifdown(db))
1354 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1355
1356 cancel_delayed_work_sync(&db->phy_poll);
1357
1358 netif_stop_queue(ndev);
1359 netif_carrier_off(ndev);
1360
1361
1362 free_irq(ndev->irq, ndev);
1363
1364 dm9000_shutdown(ndev);
1365
1366 return 0;
1367}
1368
1369static const struct net_device_ops dm9000_netdev_ops = {
1370 .ndo_open = dm9000_open,
1371 .ndo_stop = dm9000_stop,
1372 .ndo_start_xmit = dm9000_start_xmit,
1373 .ndo_tx_timeout = dm9000_timeout,
1374 .ndo_set_rx_mode = dm9000_hash_table,
1375 .ndo_do_ioctl = dm9000_ioctl,
1376 .ndo_set_features = dm9000_set_features,
1377 .ndo_validate_addr = eth_validate_addr,
1378 .ndo_set_mac_address = eth_mac_addr,
1379#ifdef CONFIG_NET_POLL_CONTROLLER
1380 .ndo_poll_controller = dm9000_poll_controller,
1381#endif
1382};
1383
1384static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1385{
1386 struct dm9000_plat_data *pdata;
1387 struct device_node *np = dev->of_node;
1388 int ret;
1389
1390 if (!IS_ENABLED(CONFIG_OF) || !np)
1391 return ERR_PTR(-ENXIO);
1392
1393 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1394 if (!pdata)
1395 return ERR_PTR(-ENOMEM);
1396
1397 if (of_find_property(np, "davicom,ext-phy", NULL))
1398 pdata->flags |= DM9000_PLATF_EXT_PHY;
1399 if (of_find_property(np, "davicom,no-eeprom", NULL))
1400 pdata->flags |= DM9000_PLATF_NO_EEPROM;
1401
1402 ret = of_get_mac_address(np, pdata->dev_addr);
1403 if (ret == -EPROBE_DEFER)
1404 return ERR_PTR(ret);
1405
1406 return pdata;
1407}
1408
1409
1410
1411
1412static int
1413dm9000_probe(struct platform_device *pdev)
1414{
1415 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
1416 struct board_info *db;
1417 struct net_device *ndev;
1418 struct device *dev = &pdev->dev;
1419 const unsigned char *mac_src;
1420 int ret = 0;
1421 int iosize;
1422 int i;
1423 u32 id_val;
1424 int reset_gpios;
1425 enum of_gpio_flags flags;
1426 struct regulator *power;
1427 bool inv_mac_addr = false;
1428
1429 power = devm_regulator_get(dev, "vcc");
1430 if (IS_ERR(power)) {
1431 if (PTR_ERR(power) == -EPROBE_DEFER)
1432 return -EPROBE_DEFER;
1433 dev_dbg(dev, "no regulator provided\n");
1434 } else {
1435 ret = regulator_enable(power);
1436 if (ret != 0) {
1437 dev_err(dev,
1438 "Failed to enable power regulator: %d\n", ret);
1439 return ret;
1440 }
1441 dev_dbg(dev, "regulator enabled\n");
1442 }
1443
1444 reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
1445 &flags);
1446 if (gpio_is_valid(reset_gpios)) {
1447 ret = devm_gpio_request_one(dev, reset_gpios, flags,
1448 "dm9000_reset");
1449 if (ret) {
1450 dev_err(dev, "failed to request reset gpio %d: %d\n",
1451 reset_gpios, ret);
1452 goto out_regulator_disable;
1453 }
1454
1455
1456 msleep(2);
1457 gpio_set_value(reset_gpios, 1);
1458
1459 msleep(4);
1460 }
1461
1462 if (!pdata) {
1463 pdata = dm9000_parse_dt(&pdev->dev);
1464 if (IS_ERR(pdata)) {
1465 ret = PTR_ERR(pdata);
1466 goto out_regulator_disable;
1467 }
1468 }
1469
1470
1471 ndev = alloc_etherdev(sizeof(struct board_info));
1472 if (!ndev) {
1473 ret = -ENOMEM;
1474 goto out_regulator_disable;
1475 }
1476
1477 SET_NETDEV_DEV(ndev, &pdev->dev);
1478
1479 dev_dbg(&pdev->dev, "dm9000_probe()\n");
1480
1481
1482 db = netdev_priv(ndev);
1483
1484 db->dev = &pdev->dev;
1485 db->ndev = ndev;
1486 if (!IS_ERR(power))
1487 db->power_supply = power;
1488
1489 spin_lock_init(&db->lock);
1490 mutex_init(&db->addr_lock);
1491
1492 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1493
1494 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1495 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1496
1497 if (!db->addr_res || !db->data_res) {
1498 dev_err(db->dev, "insufficient resources addr=%p data=%p\n",
1499 db->addr_res, db->data_res);
1500 ret = -ENOENT;
1501 goto out;
1502 }
1503
1504 ndev->irq = platform_get_irq(pdev, 0);
1505 if (ndev->irq < 0) {
1506 ret = ndev->irq;
1507 goto out;
1508 }
1509
1510 db->irq_wake = platform_get_irq_optional(pdev, 1);
1511 if (db->irq_wake >= 0) {
1512 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1513
1514 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1515 IRQF_SHARED, dev_name(db->dev), ndev);
1516 if (ret) {
1517 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1518 } else {
1519
1520
1521 ret = irq_set_irq_wake(db->irq_wake, 1);
1522 if (ret) {
1523 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1524 db->irq_wake, ret);
1525 } else {
1526 irq_set_irq_wake(db->irq_wake, 0);
1527 db->wake_supported = 1;
1528 }
1529 }
1530 }
1531
1532 iosize = resource_size(db->addr_res);
1533 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1534 pdev->name);
1535
1536 if (db->addr_req == NULL) {
1537 dev_err(db->dev, "cannot claim address reg area\n");
1538 ret = -EIO;
1539 goto out;
1540 }
1541
1542 db->io_addr = ioremap(db->addr_res->start, iosize);
1543
1544 if (db->io_addr == NULL) {
1545 dev_err(db->dev, "failed to ioremap address reg\n");
1546 ret = -EINVAL;
1547 goto out;
1548 }
1549
1550 iosize = resource_size(db->data_res);
1551 db->data_req = request_mem_region(db->data_res->start, iosize,
1552 pdev->name);
1553
1554 if (db->data_req == NULL) {
1555 dev_err(db->dev, "cannot claim data reg area\n");
1556 ret = -EIO;
1557 goto out;
1558 }
1559
1560 db->io_data = ioremap(db->data_res->start, iosize);
1561
1562 if (db->io_data == NULL) {
1563 dev_err(db->dev, "failed to ioremap data reg\n");
1564 ret = -EINVAL;
1565 goto out;
1566 }
1567
1568
1569 ndev->base_addr = (unsigned long)db->io_addr;
1570
1571
1572 dm9000_set_io(db, iosize);
1573
1574
1575 if (pdata != NULL) {
1576
1577
1578
1579 if (pdata->flags & DM9000_PLATF_8BITONLY)
1580 dm9000_set_io(db, 1);
1581
1582 if (pdata->flags & DM9000_PLATF_16BITONLY)
1583 dm9000_set_io(db, 2);
1584
1585 if (pdata->flags & DM9000_PLATF_32BITONLY)
1586 dm9000_set_io(db, 4);
1587
1588
1589
1590
1591 if (pdata->inblk != NULL)
1592 db->inblk = pdata->inblk;
1593
1594 if (pdata->outblk != NULL)
1595 db->outblk = pdata->outblk;
1596
1597 if (pdata->dumpblk != NULL)
1598 db->dumpblk = pdata->dumpblk;
1599
1600 db->flags = pdata->flags;
1601 }
1602
1603#ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1604 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1605#endif
1606
1607 dm9000_reset(db);
1608
1609
1610 for (i = 0; i < 8; i++) {
1611 id_val = ior(db, DM9000_VIDL);
1612 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1613 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1614 id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1615
1616 if (id_val == DM9000_ID)
1617 break;
1618 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1619 }
1620
1621 if (id_val != DM9000_ID) {
1622 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1623 ret = -ENODEV;
1624 goto out;
1625 }
1626
1627
1628
1629 id_val = ior(db, DM9000_CHIPR);
1630 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1631
1632 switch (id_val) {
1633 case CHIPR_DM9000A:
1634 db->type = TYPE_DM9000A;
1635 break;
1636 case CHIPR_DM9000B:
1637 db->type = TYPE_DM9000B;
1638 break;
1639 default:
1640 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1641 db->type = TYPE_DM9000E;
1642 }
1643
1644
1645 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1646 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
1647 ndev->features |= ndev->hw_features;
1648 }
1649
1650
1651
1652 ndev->netdev_ops = &dm9000_netdev_ops;
1653 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1654 ndev->ethtool_ops = &dm9000_ethtool_ops;
1655
1656 db->msg_enable = NETIF_MSG_LINK;
1657 db->mii.phy_id_mask = 0x1f;
1658 db->mii.reg_num_mask = 0x1f;
1659 db->mii.force_media = 0;
1660 db->mii.full_duplex = 0;
1661 db->mii.dev = ndev;
1662 db->mii.mdio_read = dm9000_phy_read;
1663 db->mii.mdio_write = dm9000_phy_write;
1664
1665 mac_src = "eeprom";
1666
1667
1668 for (i = 0; i < 6; i += 2)
1669 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1670
1671 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1672 mac_src = "platform data";
1673 memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
1674 }
1675
1676 if (!is_valid_ether_addr(ndev->dev_addr)) {
1677
1678
1679 mac_src = "chip";
1680 for (i = 0; i < 6; i++)
1681 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1682 }
1683
1684 if (!is_valid_ether_addr(ndev->dev_addr)) {
1685 inv_mac_addr = true;
1686 eth_hw_addr_random(ndev);
1687 mac_src = "random";
1688 }
1689
1690
1691 platform_set_drvdata(pdev, ndev);
1692 ret = register_netdev(ndev);
1693
1694 if (ret == 0) {
1695 if (inv_mac_addr)
1696 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
1697 ndev->name);
1698 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1699 ndev->name, dm9000_type_to_char(db->type),
1700 db->io_addr, db->io_data, ndev->irq,
1701 ndev->dev_addr, mac_src);
1702 }
1703 return 0;
1704
1705out:
1706 dev_err(db->dev, "not found (%d).\n", ret);
1707
1708 dm9000_release_board(pdev, db);
1709 free_netdev(ndev);
1710
1711out_regulator_disable:
1712 if (!IS_ERR(power))
1713 regulator_disable(power);
1714
1715 return ret;
1716}
1717
1718static int
1719dm9000_drv_suspend(struct device *dev)
1720{
1721 struct net_device *ndev = dev_get_drvdata(dev);
1722 struct board_info *db;
1723
1724 if (ndev) {
1725 db = netdev_priv(ndev);
1726 db->in_suspend = 1;
1727
1728 if (!netif_running(ndev))
1729 return 0;
1730
1731 netif_device_detach(ndev);
1732
1733
1734 if (!db->wake_state)
1735 dm9000_shutdown(ndev);
1736 }
1737 return 0;
1738}
1739
1740static int
1741dm9000_drv_resume(struct device *dev)
1742{
1743 struct net_device *ndev = dev_get_drvdata(dev);
1744 struct board_info *db = netdev_priv(ndev);
1745
1746 if (ndev) {
1747 if (netif_running(ndev)) {
1748
1749
1750 if (!db->wake_state) {
1751 dm9000_init_dm9000(ndev);
1752 dm9000_unmask_interrupts(db);
1753 }
1754
1755 netif_device_attach(ndev);
1756 }
1757
1758 db->in_suspend = 0;
1759 }
1760 return 0;
1761}
1762
1763static const struct dev_pm_ops dm9000_drv_pm_ops = {
1764 .suspend = dm9000_drv_suspend,
1765 .resume = dm9000_drv_resume,
1766};
1767
1768static int
1769dm9000_drv_remove(struct platform_device *pdev)
1770{
1771 struct net_device *ndev = platform_get_drvdata(pdev);
1772 struct board_info *dm = to_dm9000_board(ndev);
1773
1774 unregister_netdev(ndev);
1775 dm9000_release_board(pdev, dm);
1776 free_netdev(ndev);
1777 if (dm->power_supply)
1778 regulator_disable(dm->power_supply);
1779
1780 dev_dbg(&pdev->dev, "released and freed device\n");
1781 return 0;
1782}
1783
1784#ifdef CONFIG_OF
1785static const struct of_device_id dm9000_of_matches[] = {
1786 { .compatible = "davicom,dm9000", },
1787 { }
1788};
1789MODULE_DEVICE_TABLE(of, dm9000_of_matches);
1790#endif
1791
1792static struct platform_driver dm9000_driver = {
1793 .driver = {
1794 .name = "dm9000",
1795 .pm = &dm9000_drv_pm_ops,
1796 .of_match_table = of_match_ptr(dm9000_of_matches),
1797 },
1798 .probe = dm9000_probe,
1799 .remove = dm9000_drv_remove,
1800};
1801
1802module_platform_driver(dm9000_driver);
1803
1804MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1805MODULE_DESCRIPTION("Davicom DM9000 network driver");
1806MODULE_LICENSE("GPL");
1807MODULE_ALIAS("platform:dm9000");
1808