1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/module.h>
38#include <linux/types.h>
39#include <linux/bitops.h>
40#include <linux/init.h>
41#include <linux/dma-mapping.h>
42#include <linux/mm.h>
43#include <linux/errno.h>
44#include <linux/ioport.h>
45#include <linux/pci.h>
46#include <linux/kernel.h>
47#include <linux/netdevice.h>
48#include <linux/etherdevice.h>
49#include <linux/skbuff.h>
50#include <linux/delay.h>
51#include <linux/timer.h>
52#include <linux/slab.h>
53#include <linux/interrupt.h>
54#include <linux/string.h>
55#include <linux/wait.h>
56#include <linux/io.h>
57#include <linux/if.h>
58#include <linux/uaccess.h>
59#include <linux/proc_fs.h>
60#include <linux/of_address.h>
61#include <linux/of_device.h>
62#include <linux/of_irq.h>
63#include <linux/inetdevice.h>
64#include <linux/platform_device.h>
65#include <linux/reboot.h>
66#include <linux/ethtool.h>
67#include <linux/mii.h>
68#include <linux/in.h>
69#include <linux/if_arp.h>
70#include <linux/if_vlan.h>
71#include <linux/ip.h>
72#include <linux/tcp.h>
73#include <linux/udp.h>
74#include <linux/crc-ccitt.h>
75#include <linux/crc32.h>
76
77#include "via-velocity.h"
78
79enum velocity_bus_type {
80 BUS_PCI,
81 BUS_PLATFORM,
82};
83
84static int velocity_nics;
85
86static void velocity_set_power_state(struct velocity_info *vptr, char state)
87{
88 void *addr = vptr->mac_regs;
89
90 if (vptr->pdev)
91 pci_set_power_state(vptr->pdev, state);
92 else
93 writeb(state, addr + 0x154);
94}
95
96
97
98
99
100
101
102
103
104static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
105{
106 int i;
107
108
109 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
110
111 writeb(0, ®s->CAMADDR);
112
113
114 for (i = 0; i < 8; i++)
115 *mask++ = readb(&(regs->MARCAM[i]));
116
117
118 writeb(0, ®s->CAMADDR);
119
120
121 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
122}
123
124
125
126
127
128
129
130
131static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
132{
133 int i;
134
135 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
136
137 writeb(CAMADDR_CAMEN, ®s->CAMADDR);
138
139 for (i = 0; i < 8; i++)
140 writeb(*mask++, &(regs->MARCAM[i]));
141
142
143 writeb(0, ®s->CAMADDR);
144
145
146 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
147}
148
149static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
150{
151 int i;
152
153 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
154
155 writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, ®s->CAMADDR);
156
157 for (i = 0; i < 8; i++)
158 writeb(*mask++, &(regs->MARCAM[i]));
159
160
161 writeb(0, ®s->CAMADDR);
162
163
164 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
165}
166
167
168
169
170
171
172
173
174
175static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
176{
177 int i;
178
179
180 BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
181
182 idx &= (64 - 1);
183
184 writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR);
185
186 for (i = 0; i < 6; i++)
187 writeb(*addr++, &(regs->MARCAM[i]));
188
189 BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR);
190
191 udelay(10);
192
193 writeb(0, ®s->CAMADDR);
194
195
196 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
197}
198
199static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
200 const u8 *addr)
201{
202
203
204 BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
205
206 idx &= (64 - 1);
207
208 writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR);
209 writew(*((u16 *) addr), ®s->MARCAM[0]);
210
211 BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR);
212
213 udelay(10);
214
215 writeb(0, ®s->CAMADDR);
216
217
218 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
219}
220
221
222
223
224
225
226
227
228
229
230static void mac_wol_reset(struct mac_regs __iomem *regs)
231{
232
233
234 BYTE_REG_BITS_OFF(STICKHW_SWPTAG, ®s->STICKHW);
235
236 BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW);
237
238 BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, ®s->CHIPGCR);
239 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR);
240
241 writeb(WOLCFG_PMEOVR, ®s->WOLCFGClr);
242
243 writew(0xFFFF, ®s->WOLCRClr);
244
245 writew(0xFFFF, ®s->WOLSRClr);
246}
247
248static const struct ethtool_ops velocity_ethtool_ops;
249
250
251
252
253
254MODULE_AUTHOR("VIA Networking Technologies, Inc.");
255MODULE_LICENSE("GPL");
256MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
257
258#define VELOCITY_PARAM(N, D) \
259 static int N[MAX_UNITS] = OPTION_DEFAULT;\
260 module_param_array(N, int, NULL, 0); \
261 MODULE_PARM_DESC(N, D);
262
263#define RX_DESC_MIN 64
264#define RX_DESC_MAX 255
265#define RX_DESC_DEF 64
266VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
267
268#define TX_DESC_MIN 16
269#define TX_DESC_MAX 256
270#define TX_DESC_DEF 64
271VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
272
273#define RX_THRESH_MIN 0
274#define RX_THRESH_MAX 3
275#define RX_THRESH_DEF 0
276
277
278
279
280
281
282VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
283
284#define DMA_LENGTH_MIN 0
285#define DMA_LENGTH_MAX 7
286#define DMA_LENGTH_DEF 6
287
288
289
290
291
292
293
294
295
296
297
298VELOCITY_PARAM(DMA_length, "DMA length");
299
300#define IP_ALIG_DEF 0
301
302
303
304
305
306
307VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
308
309#define FLOW_CNTL_DEF 1
310#define FLOW_CNTL_MIN 1
311#define FLOW_CNTL_MAX 5
312
313
314
315
316
317
318
319
320VELOCITY_PARAM(flow_control, "Enable flow control ability");
321
322#define MED_LNK_DEF 0
323#define MED_LNK_MIN 0
324#define MED_LNK_MAX 5
325
326
327
328
329
330
331
332
333
334
335
336
337VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
338
339#define WOL_OPT_DEF 0
340#define WOL_OPT_MIN 0
341#define WOL_OPT_MAX 7
342
343
344
345
346
347
348
349VELOCITY_PARAM(wol_opts, "Wake On Lan options");
350
351static int rx_copybreak = 200;
352module_param(rx_copybreak, int, 0644);
353MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
354
355
356
357
358static struct velocity_info_tbl chip_info_table[] = {
359 {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
360 { }
361};
362
363
364
365
366
367
368static const struct pci_device_id velocity_pci_id_table[] = {
369 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
370 { }
371};
372
373MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
374
375
376
377
378
379static const struct of_device_id velocity_of_ids[] = {
380 { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
381 { },
382};
383MODULE_DEVICE_TABLE(of, velocity_of_ids);
384
385
386
387
388
389
390
391
392static const char *get_chip_name(enum chip_type chip_id)
393{
394 int i;
395 for (i = 0; chip_info_table[i].name != NULL; i++)
396 if (chip_info_table[i].chip_id == chip_id)
397 break;
398 return chip_info_table[i].name;
399}
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
415 char *name)
416{
417 if (val == -1)
418 *opt = def;
419 else if (val < min || val > max) {
420 pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
421 name, min, max);
422 *opt = def;
423 } else {
424 pr_info("set value of parameter %s to %d\n", name, val);
425 *opt = val;
426 }
427}
428
429
430
431
432
433
434
435
436
437
438
439
440
441static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
442 char *name)
443{
444 (*opt) &= (~flag);
445 if (val == -1)
446 *opt |= (def ? flag : 0);
447 else if (val < 0 || val > 1) {
448 pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
449 name, 0, 1);
450 *opt |= (def ? flag : 0);
451 } else {
452 pr_info("set parameter %s to %s\n",
453 name, val ? "TRUE" : "FALSE");
454 *opt |= (val ? flag : 0);
455 }
456}
457
458
459
460
461
462
463
464
465
466static void velocity_get_options(struct velocity_opt *opts, int index)
467{
468
469 velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index],
470 RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF,
471 "rx_thresh");
472 velocity_set_int_opt(&opts->DMA_length, DMA_length[index],
473 DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF,
474 "DMA_length");
475 velocity_set_int_opt(&opts->numrx, RxDescriptors[index],
476 RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF,
477 "RxDescriptors");
478 velocity_set_int_opt(&opts->numtx, TxDescriptors[index],
479 TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF,
480 "TxDescriptors");
481
482 velocity_set_int_opt(&opts->flow_cntl, flow_control[index],
483 FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF,
484 "flow_control");
485 velocity_set_bool_opt(&opts->flags, IP_byte_align[index],
486 IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN,
487 "IP_byte_align");
488 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index],
489 MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF,
490 "Media link mode");
491 velocity_set_int_opt(&opts->wol_opts, wol_opts[index],
492 WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF,
493 "Wake On Lan options");
494 opts->numrx = (opts->numrx & ~3);
495}
496
497
498
499
500
501
502
503
504static void velocity_init_cam_filter(struct velocity_info *vptr)
505{
506 struct mac_regs __iomem *regs = vptr->mac_regs;
507 unsigned int vid, i = 0;
508
509
510 WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG);
511 WORD_REG_BITS_ON(MCFG_VIDFR, ®s->MCFG);
512
513
514 memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
515 memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
516 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
517 mac_set_cam_mask(regs, vptr->mCAMmask);
518
519
520 for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
521 mac_set_vlan_cam(regs, i, (u8 *) &vid);
522 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
523 if (++i >= VCAM_SIZE)
524 break;
525 }
526 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
527}
528
529static int velocity_vlan_rx_add_vid(struct net_device *dev,
530 __be16 proto, u16 vid)
531{
532 struct velocity_info *vptr = netdev_priv(dev);
533
534 spin_lock_irq(&vptr->lock);
535 set_bit(vid, vptr->active_vlans);
536 velocity_init_cam_filter(vptr);
537 spin_unlock_irq(&vptr->lock);
538 return 0;
539}
540
541static int velocity_vlan_rx_kill_vid(struct net_device *dev,
542 __be16 proto, u16 vid)
543{
544 struct velocity_info *vptr = netdev_priv(dev);
545
546 spin_lock_irq(&vptr->lock);
547 clear_bit(vid, vptr->active_vlans);
548 velocity_init_cam_filter(vptr);
549 spin_unlock_irq(&vptr->lock);
550 return 0;
551}
552
553static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
554{
555 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
556}
557
558
559
560
561
562
563
564
565static void velocity_rx_reset(struct velocity_info *vptr)
566{
567
568 struct mac_regs __iomem *regs = vptr->mac_regs;
569 int i;
570
571 velocity_init_rx_ring_indexes(vptr);
572
573
574
575
576 for (i = 0; i < vptr->options.numrx; ++i)
577 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
578
579 writew(vptr->options.numrx, ®s->RBRDU);
580 writel(vptr->rx.pool_dma, ®s->RDBaseLo);
581 writew(0, ®s->RDIdx);
582 writew(vptr->options.numrx - 1, ®s->RDCSize);
583}
584
585
586
587
588
589
590
591
592
593static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
594{
595 u32 status = 0;
596
597 switch (vptr->options.spd_dpx) {
598 case SPD_DPX_AUTO:
599 status = VELOCITY_AUTONEG_ENABLE;
600 break;
601 case SPD_DPX_100_FULL:
602 status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
603 break;
604 case SPD_DPX_10_FULL:
605 status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
606 break;
607 case SPD_DPX_100_HALF:
608 status = VELOCITY_SPEED_100;
609 break;
610 case SPD_DPX_10_HALF:
611 status = VELOCITY_SPEED_10;
612 break;
613 case SPD_DPX_1000_FULL:
614 status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
615 break;
616 }
617 vptr->mii_status = status;
618 return status;
619}
620
621
622
623
624
625
626
627static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
628{
629 u16 ww;
630
631
632 writeb(0, ®s->MIICR);
633 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
634 udelay(1);
635 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
636 break;
637 }
638}
639
640
641
642
643
644
645
646
647static void enable_mii_autopoll(struct mac_regs __iomem *regs)
648{
649 int ii;
650
651 writeb(0, &(regs->MIICR));
652 writeb(MIIADR_SWMPL, ®s->MIIADR);
653
654 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
655 udelay(1);
656 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
657 break;
658 }
659
660 writeb(MIICR_MAUTO, ®s->MIICR);
661
662 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
663 udelay(1);
664 if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
665 break;
666 }
667
668}
669
670
671
672
673
674
675
676
677
678
679static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
680{
681 u16 ww;
682
683
684
685
686 safe_disable_mii_autopoll(regs);
687
688 writeb(index, ®s->MIIADR);
689
690 BYTE_REG_BITS_ON(MIICR_RCMD, ®s->MIICR);
691
692 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
693 if (!(readb(®s->MIICR) & MIICR_RCMD))
694 break;
695 }
696
697 *data = readw(®s->MIIDATA);
698
699 enable_mii_autopoll(regs);
700 if (ww == W_MAX_TIMEOUT)
701 return -ETIMEDOUT;
702 return 0;
703}
704
705
706
707
708
709
710
711
712static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
713{
714 u32 status = 0;
715 u16 ANAR;
716
717 if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
718 status |= VELOCITY_LINK_FAIL;
719
720 if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
721 status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
722 else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
723 status |= (VELOCITY_SPEED_1000);
724 else {
725 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
726 if (ANAR & ADVERTISE_100FULL)
727 status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
728 else if (ANAR & ADVERTISE_100HALF)
729 status |= VELOCITY_SPEED_100;
730 else if (ANAR & ADVERTISE_10FULL)
731 status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
732 else
733 status |= (VELOCITY_SPEED_10);
734 }
735
736 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
737 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
738 if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
739 == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
740 if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
741 status |= VELOCITY_AUTONEG_ENABLE;
742 }
743 }
744
745 return status;
746}
747
748
749
750
751
752
753
754
755
756
757static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
758{
759 u16 ww;
760
761
762
763
764 safe_disable_mii_autopoll(regs);
765
766
767 writeb(mii_addr, ®s->MIIADR);
768
769 writew(data, ®s->MIIDATA);
770
771
772 BYTE_REG_BITS_ON(MIICR_WCMD, ®s->MIICR);
773
774
775 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
776 udelay(5);
777 if (!(readb(®s->MIICR) & MIICR_WCMD))
778 break;
779 }
780 enable_mii_autopoll(regs);
781
782 if (ww == W_MAX_TIMEOUT)
783 return -ETIMEDOUT;
784 return 0;
785}
786
787
788
789
790
791
792
793
794static void set_mii_flow_control(struct velocity_info *vptr)
795{
796
797 switch (vptr->options.flow_cntl) {
798 case FLOW_CNTL_TX:
799 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
800 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
801 break;
802
803 case FLOW_CNTL_RX:
804 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
805 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
806 break;
807
808 case FLOW_CNTL_TX_RX:
809 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
810 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
811 break;
812
813 case FLOW_CNTL_DISABLE:
814 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
815 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
816 break;
817 default:
818 break;
819 }
820}
821
822
823
824
825
826
827
828static void mii_set_auto_on(struct velocity_info *vptr)
829{
830 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
831 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
832 else
833 MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
834}
835
836static u32 check_connection_type(struct mac_regs __iomem *regs)
837{
838 u32 status = 0;
839 u8 PHYSR0;
840 u16 ANAR;
841 PHYSR0 = readb(®s->PHYSR0);
842
843
844
845
846
847
848 if (PHYSR0 & PHYSR0_FDPX)
849 status |= VELOCITY_DUPLEX_FULL;
850
851 if (PHYSR0 & PHYSR0_SPDG)
852 status |= VELOCITY_SPEED_1000;
853 else if (PHYSR0 & PHYSR0_SPD10)
854 status |= VELOCITY_SPEED_10;
855 else
856 status |= VELOCITY_SPEED_100;
857
858 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
859 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
860 if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
861 == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
862 if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
863 status |= VELOCITY_AUTONEG_ENABLE;
864 }
865 }
866
867 return status;
868}
869
870
871
872
873
874
875
876
877
878
879static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
880{
881 struct mac_regs __iomem *regs = vptr->mac_regs;
882
883 vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
884
885
886 set_mii_flow_control(vptr);
887
888 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
889 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
890
891
892
893
894 if (mii_status & VELOCITY_AUTONEG_ENABLE) {
895 netdev_info(vptr->netdev, "Velocity is in AUTO mode\n");
896
897 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR);
898
899 MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
900 MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
901 MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
902
903
904 mii_set_auto_on(vptr);
905 } else {
906 u16 CTRL1000;
907 u16 ANAR;
908 u8 CHIPGCR;
909
910
911
912
913
914
915
916
917
918 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR);
919
920 CHIPGCR = readb(®s->CHIPGCR);
921
922 if (mii_status & VELOCITY_SPEED_1000)
923 CHIPGCR |= CHIPGCR_FCGMII;
924 else
925 CHIPGCR &= ~CHIPGCR_FCGMII;
926
927 if (mii_status & VELOCITY_DUPLEX_FULL) {
928 CHIPGCR |= CHIPGCR_FCFDX;
929 writeb(CHIPGCR, ®s->CHIPGCR);
930 netdev_info(vptr->netdev,
931 "set Velocity to forced full mode\n");
932 if (vptr->rev_id < REV_ID_VT3216_A0)
933 BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR);
934 } else {
935 CHIPGCR &= ~CHIPGCR_FCFDX;
936 netdev_info(vptr->netdev,
937 "set Velocity to forced half mode\n");
938 writeb(CHIPGCR, ®s->CHIPGCR);
939 if (vptr->rev_id < REV_ID_VT3216_A0)
940 BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR);
941 }
942
943 velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
944 CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
945 if ((mii_status & VELOCITY_SPEED_1000) &&
946 (mii_status & VELOCITY_DUPLEX_FULL)) {
947 CTRL1000 |= ADVERTISE_1000FULL;
948 }
949 velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
950
951 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
952 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG);
953 else
954 BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG);
955
956
957 velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
958 ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
959 if (mii_status & VELOCITY_SPEED_100) {
960 if (mii_status & VELOCITY_DUPLEX_FULL)
961 ANAR |= ADVERTISE_100FULL;
962 else
963 ANAR |= ADVERTISE_100HALF;
964 } else if (mii_status & VELOCITY_SPEED_10) {
965 if (mii_status & VELOCITY_DUPLEX_FULL)
966 ANAR |= ADVERTISE_10FULL;
967 else
968 ANAR |= ADVERTISE_10HALF;
969 }
970 velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
971
972 mii_set_auto_on(vptr);
973
974 }
975
976
977 return VELOCITY_LINK_CHANGE;
978}
979
980
981
982
983
984
985
986
987
988static void velocity_print_link_status(struct velocity_info *vptr)
989{
990 const char *link;
991 const char *speed;
992 const char *duplex;
993
994 if (vptr->mii_status & VELOCITY_LINK_FAIL) {
995 netdev_notice(vptr->netdev, "failed to detect cable link\n");
996 return;
997 }
998
999 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1000 link = "auto-negotiation";
1001
1002 if (vptr->mii_status & VELOCITY_SPEED_1000)
1003 speed = "1000";
1004 else if (vptr->mii_status & VELOCITY_SPEED_100)
1005 speed = "100";
1006 else
1007 speed = "10";
1008
1009 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1010 duplex = "full";
1011 else
1012 duplex = "half";
1013 } else {
1014 link = "forced";
1015
1016 switch (vptr->options.spd_dpx) {
1017 case SPD_DPX_1000_FULL:
1018 speed = "1000";
1019 duplex = "full";
1020 break;
1021 case SPD_DPX_100_HALF:
1022 speed = "100";
1023 duplex = "half";
1024 break;
1025 case SPD_DPX_100_FULL:
1026 speed = "100";
1027 duplex = "full";
1028 break;
1029 case SPD_DPX_10_HALF:
1030 speed = "10";
1031 duplex = "half";
1032 break;
1033 case SPD_DPX_10_FULL:
1034 speed = "10";
1035 duplex = "full";
1036 break;
1037 default:
1038 speed = "unknown";
1039 duplex = "unknown";
1040 break;
1041 }
1042 }
1043 netdev_notice(vptr->netdev, "Link %s speed %sM bps %s duplex\n",
1044 link, speed, duplex);
1045}
1046
1047
1048
1049
1050
1051
1052
1053
1054static void enable_flow_control_ability(struct velocity_info *vptr)
1055{
1056
1057 struct mac_regs __iomem *regs = vptr->mac_regs;
1058
1059 switch (vptr->options.flow_cntl) {
1060
1061 case FLOW_CNTL_DEFAULT:
1062 if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, ®s->PHYSR0))
1063 writel(CR0_FDXRFCEN, ®s->CR0Set);
1064 else
1065 writel(CR0_FDXRFCEN, ®s->CR0Clr);
1066
1067 if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, ®s->PHYSR0))
1068 writel(CR0_FDXTFCEN, ®s->CR0Set);
1069 else
1070 writel(CR0_FDXTFCEN, ®s->CR0Clr);
1071 break;
1072
1073 case FLOW_CNTL_TX:
1074 writel(CR0_FDXTFCEN, ®s->CR0Set);
1075 writel(CR0_FDXRFCEN, ®s->CR0Clr);
1076 break;
1077
1078 case FLOW_CNTL_RX:
1079 writel(CR0_FDXRFCEN, ®s->CR0Set);
1080 writel(CR0_FDXTFCEN, ®s->CR0Clr);
1081 break;
1082
1083 case FLOW_CNTL_TX_RX:
1084 writel(CR0_FDXTFCEN, ®s->CR0Set);
1085 writel(CR0_FDXRFCEN, ®s->CR0Set);
1086 break;
1087
1088 case FLOW_CNTL_DISABLE:
1089 writel(CR0_FDXRFCEN, ®s->CR0Clr);
1090 writel(CR0_FDXTFCEN, ®s->CR0Clr);
1091 break;
1092
1093 default:
1094 break;
1095 }
1096
1097}
1098
1099
1100
1101
1102
1103
1104
1105
1106static int velocity_soft_reset(struct velocity_info *vptr)
1107{
1108 struct mac_regs __iomem *regs = vptr->mac_regs;
1109 int i = 0;
1110
1111 writel(CR0_SFRST, ®s->CR0Set);
1112
1113 for (i = 0; i < W_MAX_TIMEOUT; i++) {
1114 udelay(5);
1115 if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, ®s->CR0Set))
1116 break;
1117 }
1118
1119 if (i == W_MAX_TIMEOUT) {
1120 writel(CR0_FORSRST, ®s->CR0Set);
1121
1122
1123 mdelay(2);
1124 }
1125 return 0;
1126}
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136static void velocity_set_multi(struct net_device *dev)
1137{
1138 struct velocity_info *vptr = netdev_priv(dev);
1139 struct mac_regs __iomem *regs = vptr->mac_regs;
1140 u8 rx_mode;
1141 int i;
1142 struct netdev_hw_addr *ha;
1143
1144 if (dev->flags & IFF_PROMISC) {
1145 writel(0xffffffff, ®s->MARCAM[0]);
1146 writel(0xffffffff, ®s->MARCAM[4]);
1147 rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1148 } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1149 (dev->flags & IFF_ALLMULTI)) {
1150 writel(0xffffffff, ®s->MARCAM[0]);
1151 writel(0xffffffff, ®s->MARCAM[4]);
1152 rx_mode = (RCR_AM | RCR_AB);
1153 } else {
1154 int offset = MCAM_SIZE - vptr->multicast_limit;
1155 mac_get_cam_mask(regs, vptr->mCAMmask);
1156
1157 i = 0;
1158 netdev_for_each_mc_addr(ha, dev) {
1159 mac_set_cam(regs, i + offset, ha->addr);
1160 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1161 i++;
1162 }
1163
1164 mac_set_cam_mask(regs, vptr->mCAMmask);
1165 rx_mode = RCR_AM | RCR_AB | RCR_AP;
1166 }
1167 if (dev->mtu > 1500)
1168 rx_mode |= RCR_AL;
1169
1170 BYTE_REG_BITS_ON(rx_mode, ®s->RCR);
1171
1172}
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185static void mii_init(struct velocity_info *vptr, u32 mii_status)
1186{
1187 u16 BMCR;
1188
1189 switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1190 case PHYID_ICPLUS_IP101A:
1191 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP),
1192 MII_ADVERTISE, vptr->mac_regs);
1193 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1194 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION,
1195 vptr->mac_regs);
1196 else
1197 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION,
1198 vptr->mac_regs);
1199 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1200 break;
1201 case PHYID_CICADA_CS8201:
1202
1203
1204
1205 MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1206
1207
1208
1209
1210
1211 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1212 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1213 else
1214 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1215
1216
1217
1218 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1219 break;
1220 case PHYID_VT3216_32BIT:
1221 case PHYID_VT3216_64BIT:
1222
1223
1224
1225 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1226
1227
1228
1229
1230
1231 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1232 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1233 else
1234 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1235 break;
1236
1237 case PHYID_MARVELL_1000:
1238 case PHYID_MARVELL_1000S:
1239
1240
1241
1242 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1243
1244
1245
1246 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1247 break;
1248 default:
1249 ;
1250 }
1251 velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1252 if (BMCR & BMCR_ISOLATE) {
1253 BMCR &= ~BMCR_ISOLATE;
1254 velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1255 }
1256}
1257
1258
1259
1260
1261
1262
1263
1264
1265static void setup_queue_timers(struct velocity_info *vptr)
1266{
1267
1268 if (vptr->rev_id >= REV_ID_VT3216_A0) {
1269 u8 txqueue_timer = 0;
1270 u8 rxqueue_timer = 0;
1271
1272 if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1273 VELOCITY_SPEED_100)) {
1274 txqueue_timer = vptr->options.txqueue_timer;
1275 rxqueue_timer = vptr->options.rxqueue_timer;
1276 }
1277
1278 writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1279 writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1280 }
1281}
1282
1283
1284
1285
1286
1287
1288
1289
1290static void setup_adaptive_interrupts(struct velocity_info *vptr)
1291{
1292 struct mac_regs __iomem *regs = vptr->mac_regs;
1293 u16 tx_intsup = vptr->options.tx_intsup;
1294 u16 rx_intsup = vptr->options.rx_intsup;
1295
1296
1297 vptr->int_mask = INT_MASK_DEF;
1298
1299
1300 writeb(CAMCR_PS0, ®s->CAMCR);
1301 if (tx_intsup != 0) {
1302 vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1303 ISR_PTX2I | ISR_PTX3I);
1304 writew(tx_intsup, ®s->ISRCTL);
1305 } else
1306 writew(ISRCTL_TSUPDIS, ®s->ISRCTL);
1307
1308
1309 writeb(CAMCR_PS1, ®s->CAMCR);
1310 if (rx_intsup != 0) {
1311 vptr->int_mask &= ~ISR_PRXI;
1312 writew(rx_intsup, ®s->ISRCTL);
1313 } else
1314 writew(ISRCTL_RSUPDIS, ®s->ISRCTL);
1315
1316
1317 writeb(0, ®s->CAMCR);
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328static void velocity_init_registers(struct velocity_info *vptr,
1329 enum velocity_init_type type)
1330{
1331 struct mac_regs __iomem *regs = vptr->mac_regs;
1332 struct net_device *netdev = vptr->netdev;
1333 int i, mii_status;
1334
1335 mac_wol_reset(regs);
1336
1337 switch (type) {
1338 case VELOCITY_INIT_RESET:
1339 case VELOCITY_INIT_WOL:
1340
1341 netif_stop_queue(netdev);
1342
1343
1344
1345
1346 velocity_rx_reset(vptr);
1347 mac_rx_queue_run(regs);
1348 mac_rx_queue_wake(regs);
1349
1350 mii_status = velocity_get_opt_media_mode(vptr);
1351 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1352 velocity_print_link_status(vptr);
1353 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1354 netif_wake_queue(netdev);
1355 }
1356
1357 enable_flow_control_ability(vptr);
1358
1359 mac_clear_isr(regs);
1360 writel(CR0_STOP, ®s->CR0Clr);
1361 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1362 ®s->CR0Set);
1363
1364 break;
1365
1366 case VELOCITY_INIT_COLD:
1367 default:
1368
1369
1370
1371 velocity_soft_reset(vptr);
1372 mdelay(5);
1373
1374 if (!vptr->no_eeprom) {
1375 mac_eeprom_reload(regs);
1376 for (i = 0; i < 6; i++)
1377 writeb(netdev->dev_addr[i], regs->PAR + i);
1378 }
1379
1380
1381
1382
1383 BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1384 mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1385 mac_set_dma_length(regs, vptr->options.DMA_length);
1386
1387 writeb(WOLCFG_SAM | WOLCFG_SAB, ®s->WOLCFGSet);
1388
1389
1390
1391 BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), ®s->CFGB);
1392
1393
1394
1395
1396 velocity_init_cam_filter(vptr);
1397
1398
1399
1400
1401 velocity_set_multi(netdev);
1402
1403
1404
1405
1406 enable_mii_autopoll(regs);
1407
1408 setup_adaptive_interrupts(vptr);
1409
1410 writel(vptr->rx.pool_dma, ®s->RDBaseLo);
1411 writew(vptr->options.numrx - 1, ®s->RDCSize);
1412 mac_rx_queue_run(regs);
1413 mac_rx_queue_wake(regs);
1414
1415 writew(vptr->options.numtx - 1, ®s->TDCSize);
1416
1417 for (i = 0; i < vptr->tx.numq; i++) {
1418 writel(vptr->tx.pool_dma[i], ®s->TDBaseLo[i]);
1419 mac_tx_queue_run(regs, i);
1420 }
1421
1422 init_flow_control_register(vptr);
1423
1424 writel(CR0_STOP, ®s->CR0Clr);
1425 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set);
1426
1427 mii_status = velocity_get_opt_media_mode(vptr);
1428 netif_stop_queue(netdev);
1429
1430 mii_init(vptr, mii_status);
1431
1432 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1433 velocity_print_link_status(vptr);
1434 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1435 netif_wake_queue(netdev);
1436 }
1437
1438 enable_flow_control_ability(vptr);
1439 mac_hw_mibs_init(regs);
1440 mac_write_int_mask(vptr->int_mask, regs);
1441 mac_clear_isr(regs);
1442
1443 }
1444}
1445
1446static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1447{
1448 struct mac_regs __iomem *regs = vptr->mac_regs;
1449 int avail, dirty, unusable;
1450
1451
1452
1453
1454
1455 if (vptr->rx.filled < 4)
1456 return;
1457
1458 wmb();
1459
1460 unusable = vptr->rx.filled & 0x0003;
1461 dirty = vptr->rx.dirty - unusable;
1462 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1463 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1464 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1465 }
1466
1467 writew(vptr->rx.filled & 0xfffc, ®s->RBRDU);
1468 vptr->rx.filled = unusable;
1469}
1470
1471
1472
1473
1474
1475
1476
1477
1478static int velocity_init_dma_rings(struct velocity_info *vptr)
1479{
1480 struct velocity_opt *opt = &vptr->options;
1481 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1482 const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1483 dma_addr_t pool_dma;
1484 void *pool;
1485 unsigned int i;
1486
1487
1488
1489
1490
1491
1492
1493 pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
1494 rx_ring_size, &pool_dma, GFP_ATOMIC);
1495 if (!pool) {
1496 dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
1497 vptr->netdev->name);
1498 return -ENOMEM;
1499 }
1500
1501 vptr->rx.ring = pool;
1502 vptr->rx.pool_dma = pool_dma;
1503
1504 pool += rx_ring_size;
1505 pool_dma += rx_ring_size;
1506
1507 for (i = 0; i < vptr->tx.numq; i++) {
1508 vptr->tx.rings[i] = pool;
1509 vptr->tx.pool_dma[i] = pool_dma;
1510 pool += tx_ring_size;
1511 pool_dma += tx_ring_size;
1512 }
1513
1514 return 0;
1515}
1516
1517static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1518{
1519 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1520}
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1533{
1534 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1535 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1536
1537 rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
1538 if (rd_info->skb == NULL)
1539 return -ENOMEM;
1540
1541
1542
1543
1544
1545 skb_reserve(rd_info->skb,
1546 64 - ((unsigned long) rd_info->skb->data & 63));
1547 rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
1548 vptr->rx.buf_sz, DMA_FROM_DEVICE);
1549
1550
1551
1552
1553
1554 *((u32 *) & (rd->rdesc0)) = 0;
1555 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1556 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1557 rd->pa_high = 0;
1558 return 0;
1559}
1560
1561
1562static int velocity_rx_refill(struct velocity_info *vptr)
1563{
1564 int dirty = vptr->rx.dirty, done = 0;
1565
1566 do {
1567 struct rx_desc *rd = vptr->rx.ring + dirty;
1568
1569
1570 if (rd->rdesc0.len & OWNED_BY_NIC)
1571 break;
1572
1573 if (!vptr->rx.info[dirty].skb) {
1574 if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1575 break;
1576 }
1577 done++;
1578 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1579 } while (dirty != vptr->rx.curr);
1580
1581 if (done) {
1582 vptr->rx.dirty = dirty;
1583 vptr->rx.filled += done;
1584 }
1585
1586 return done;
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596static void velocity_free_rd_ring(struct velocity_info *vptr)
1597{
1598 int i;
1599
1600 if (vptr->rx.info == NULL)
1601 return;
1602
1603 for (i = 0; i < vptr->options.numrx; i++) {
1604 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1605 struct rx_desc *rd = vptr->rx.ring + i;
1606
1607 memset(rd, 0, sizeof(*rd));
1608
1609 if (!rd_info->skb)
1610 continue;
1611 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
1612 DMA_FROM_DEVICE);
1613 rd_info->skb_dma = 0;
1614
1615 dev_kfree_skb(rd_info->skb);
1616 rd_info->skb = NULL;
1617 }
1618
1619 kfree(vptr->rx.info);
1620 vptr->rx.info = NULL;
1621}
1622
1623
1624
1625
1626
1627
1628
1629
1630static int velocity_init_rd_ring(struct velocity_info *vptr)
1631{
1632 int ret = -ENOMEM;
1633
1634 vptr->rx.info = kcalloc(vptr->options.numrx,
1635 sizeof(struct velocity_rd_info), GFP_KERNEL);
1636 if (!vptr->rx.info)
1637 goto out;
1638
1639 velocity_init_rx_ring_indexes(vptr);
1640
1641 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1642 netdev_err(vptr->netdev, "failed to allocate RX buffer\n");
1643 velocity_free_rd_ring(vptr);
1644 goto out;
1645 }
1646
1647 ret = 0;
1648out:
1649 return ret;
1650}
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660static int velocity_init_td_ring(struct velocity_info *vptr)
1661{
1662 int j;
1663
1664
1665 for (j = 0; j < vptr->tx.numq; j++) {
1666
1667 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1668 sizeof(struct velocity_td_info),
1669 GFP_KERNEL);
1670 if (!vptr->tx.infos[j]) {
1671 while (--j >= 0)
1672 kfree(vptr->tx.infos[j]);
1673 return -ENOMEM;
1674 }
1675
1676 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1677 }
1678 return 0;
1679}
1680
1681
1682
1683
1684
1685
1686
1687static void velocity_free_dma_rings(struct velocity_info *vptr)
1688{
1689 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1690 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1691
1692 dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
1693}
1694
1695static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1696{
1697 int ret;
1698
1699 velocity_set_rxbufsize(vptr, mtu);
1700
1701 ret = velocity_init_dma_rings(vptr);
1702 if (ret < 0)
1703 goto out;
1704
1705 ret = velocity_init_rd_ring(vptr);
1706 if (ret < 0)
1707 goto err_free_dma_rings_0;
1708
1709 ret = velocity_init_td_ring(vptr);
1710 if (ret < 0)
1711 goto err_free_rd_ring_1;
1712out:
1713 return ret;
1714
1715err_free_rd_ring_1:
1716 velocity_free_rd_ring(vptr);
1717err_free_dma_rings_0:
1718 velocity_free_dma_rings(vptr);
1719 goto out;
1720}
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731static void velocity_free_tx_buf(struct velocity_info *vptr,
1732 struct velocity_td_info *tdinfo, struct tx_desc *td)
1733{
1734 struct sk_buff *skb = tdinfo->skb;
1735 int i;
1736
1737
1738
1739
1740 for (i = 0; i < tdinfo->nskb_dma; i++) {
1741 size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1742
1743
1744 if (skb_shinfo(skb)->nr_frags > 0)
1745 pktlen = max_t(size_t, pktlen,
1746 td->td_buf[i].size & ~TD_QUEUE);
1747
1748 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1749 le16_to_cpu(pktlen), DMA_TO_DEVICE);
1750 }
1751 dev_consume_skb_irq(skb);
1752 tdinfo->skb = NULL;
1753}
1754
1755
1756
1757
1758static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1759 int q, int n)
1760{
1761 struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1762 int i;
1763
1764 if (td_info == NULL)
1765 return;
1766
1767 if (td_info->skb) {
1768 for (i = 0; i < td_info->nskb_dma; i++) {
1769 if (td_info->skb_dma[i]) {
1770 dma_unmap_single(vptr->dev, td_info->skb_dma[i],
1771 td_info->skb->len, DMA_TO_DEVICE);
1772 td_info->skb_dma[i] = 0;
1773 }
1774 }
1775 dev_kfree_skb(td_info->skb);
1776 td_info->skb = NULL;
1777 }
1778}
1779
1780
1781
1782
1783
1784
1785
1786
1787static void velocity_free_td_ring(struct velocity_info *vptr)
1788{
1789 int i, j;
1790
1791 for (j = 0; j < vptr->tx.numq; j++) {
1792 if (vptr->tx.infos[j] == NULL)
1793 continue;
1794 for (i = 0; i < vptr->options.numtx; i++)
1795 velocity_free_td_ring_entry(vptr, j, i);
1796
1797 kfree(vptr->tx.infos[j]);
1798 vptr->tx.infos[j] = NULL;
1799 }
1800}
1801
1802static void velocity_free_rings(struct velocity_info *vptr)
1803{
1804 velocity_free_td_ring(vptr);
1805 velocity_free_rd_ring(vptr);
1806 velocity_free_dma_rings(vptr);
1807}
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820static void velocity_error(struct velocity_info *vptr, int status)
1821{
1822
1823 if (status & ISR_TXSTLI) {
1824 struct mac_regs __iomem *regs = vptr->mac_regs;
1825
1826 netdev_err(vptr->netdev, "TD structure error TDindex=%hx\n",
1827 readw(®s->TDIdx[0]));
1828 BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR);
1829 writew(TRDCSR_RUN, ®s->TDCSRClr);
1830 netif_stop_queue(vptr->netdev);
1831
1832
1833
1834 }
1835
1836 if (status & ISR_SRCI) {
1837 struct mac_regs __iomem *regs = vptr->mac_regs;
1838 int linked;
1839
1840 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1841 vptr->mii_status = check_connection_type(regs);
1842
1843
1844
1845
1846
1847
1848 if (vptr->rev_id < REV_ID_VT3216_A0) {
1849 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1850 BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR);
1851 else
1852 BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR);
1853 }
1854
1855
1856
1857 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1858 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG);
1859 else
1860 BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG);
1861
1862 setup_queue_timers(vptr);
1863 }
1864
1865
1866
1867 linked = readb(®s->PHYSR0) & PHYSR0_LINKGD;
1868
1869 if (linked) {
1870 vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1871 netif_carrier_on(vptr->netdev);
1872 } else {
1873 vptr->mii_status |= VELOCITY_LINK_FAIL;
1874 netif_carrier_off(vptr->netdev);
1875 }
1876
1877 velocity_print_link_status(vptr);
1878 enable_flow_control_ability(vptr);
1879
1880
1881
1882
1883
1884
1885 enable_mii_autopoll(regs);
1886
1887 if (vptr->mii_status & VELOCITY_LINK_FAIL)
1888 netif_stop_queue(vptr->netdev);
1889 else
1890 netif_wake_queue(vptr->netdev);
1891
1892 }
1893 if (status & ISR_MIBFI)
1894 velocity_update_hw_mibs(vptr);
1895 if (status & ISR_LSTEI)
1896 mac_rx_queue_wake(vptr->mac_regs);
1897}
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907static int velocity_tx_srv(struct velocity_info *vptr)
1908{
1909 struct tx_desc *td;
1910 int qnum;
1911 int full = 0;
1912 int idx;
1913 int works = 0;
1914 struct velocity_td_info *tdinfo;
1915 struct net_device_stats *stats = &vptr->netdev->stats;
1916
1917 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1918 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1919 idx = (idx + 1) % vptr->options.numtx) {
1920
1921
1922
1923
1924 td = &(vptr->tx.rings[qnum][idx]);
1925 tdinfo = &(vptr->tx.infos[qnum][idx]);
1926
1927 if (td->tdesc0.len & OWNED_BY_NIC)
1928 break;
1929
1930 if ((works++ > 15))
1931 break;
1932
1933 if (td->tdesc0.TSR & TSR0_TERR) {
1934 stats->tx_errors++;
1935 stats->tx_dropped++;
1936 if (td->tdesc0.TSR & TSR0_CDH)
1937 stats->tx_heartbeat_errors++;
1938 if (td->tdesc0.TSR & TSR0_CRS)
1939 stats->tx_carrier_errors++;
1940 if (td->tdesc0.TSR & TSR0_ABT)
1941 stats->tx_aborted_errors++;
1942 if (td->tdesc0.TSR & TSR0_OWC)
1943 stats->tx_window_errors++;
1944 } else {
1945 stats->tx_packets++;
1946 stats->tx_bytes += tdinfo->skb->len;
1947 }
1948 velocity_free_tx_buf(vptr, tdinfo, td);
1949 vptr->tx.used[qnum]--;
1950 }
1951 vptr->tx.tail[qnum] = idx;
1952
1953 if (AVAIL_TD(vptr, qnum) < 1)
1954 full = 1;
1955 }
1956
1957
1958
1959
1960 if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
1961 (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1962 netif_wake_queue(vptr->netdev);
1963 }
1964 return works;
1965}
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1976{
1977 skb_checksum_none_assert(skb);
1978
1979 if (rd->rdesc1.CSM & CSM_IPKT) {
1980 if (rd->rdesc1.CSM & CSM_IPOK) {
1981 if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1982 (rd->rdesc1.CSM & CSM_UDPKT)) {
1983 if (!(rd->rdesc1.CSM & CSM_TUPOK))
1984 return;
1985 }
1986 skb->ip_summed = CHECKSUM_UNNECESSARY;
1987 }
1988 }
1989}
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
2003 struct velocity_info *vptr)
2004{
2005 int ret = -1;
2006 if (pkt_size < rx_copybreak) {
2007 struct sk_buff *new_skb;
2008
2009 new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
2010 if (new_skb) {
2011 new_skb->ip_summed = rx_skb[0]->ip_summed;
2012 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
2013 *rx_skb = new_skb;
2014 ret = 0;
2015 }
2016
2017 }
2018 return ret;
2019}
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030static inline void velocity_iph_realign(struct velocity_info *vptr,
2031 struct sk_buff *skb, int pkt_size)
2032{
2033 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2034 memmove(skb->data + 2, skb->data, pkt_size);
2035 skb_reserve(skb, 2);
2036 }
2037}
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2048{
2049 struct net_device_stats *stats = &vptr->netdev->stats;
2050 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2051 struct rx_desc *rd = &(vptr->rx.ring[idx]);
2052 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2053 struct sk_buff *skb;
2054
2055 if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
2056 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
2057 netdev_err(vptr->netdev, "received frame spans multiple RDs\n");
2058 stats->rx_length_errors++;
2059 return -EINVAL;
2060 }
2061
2062 if (rd->rdesc0.RSR & RSR_MAR)
2063 stats->multicast++;
2064
2065 skb = rd_info->skb;
2066
2067 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2068 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2069
2070 velocity_rx_csum(rd, skb);
2071
2072 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2073 velocity_iph_realign(vptr, skb, pkt_len);
2074 rd_info->skb = NULL;
2075 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
2076 DMA_FROM_DEVICE);
2077 } else {
2078 dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
2079 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2080 }
2081
2082 skb_put(skb, pkt_len - 4);
2083 skb->protocol = eth_type_trans(skb, vptr->netdev);
2084
2085 if (rd->rdesc0.RSR & RSR_DETAG) {
2086 u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2087
2088 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2089 }
2090 netif_receive_skb(skb);
2091
2092 stats->rx_bytes += pkt_len;
2093 stats->rx_packets++;
2094
2095 return 0;
2096}
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2108{
2109 struct net_device_stats *stats = &vptr->netdev->stats;
2110 int rd_curr = vptr->rx.curr;
2111 int works = 0;
2112
2113 while (works < budget_left) {
2114 struct rx_desc *rd = vptr->rx.ring + rd_curr;
2115
2116 if (!vptr->rx.info[rd_curr].skb)
2117 break;
2118
2119 if (rd->rdesc0.len & OWNED_BY_NIC)
2120 break;
2121
2122 rmb();
2123
2124
2125
2126
2127 if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2128 if (velocity_receive_frame(vptr, rd_curr) < 0)
2129 stats->rx_dropped++;
2130 } else {
2131 if (rd->rdesc0.RSR & RSR_CRC)
2132 stats->rx_crc_errors++;
2133 if (rd->rdesc0.RSR & RSR_FAE)
2134 stats->rx_frame_errors++;
2135
2136 stats->rx_dropped++;
2137 }
2138
2139 rd->size |= RX_INTEN;
2140
2141 rd_curr++;
2142 if (rd_curr >= vptr->options.numrx)
2143 rd_curr = 0;
2144 works++;
2145 }
2146
2147 vptr->rx.curr = rd_curr;
2148
2149 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2150 velocity_give_many_rx_descs(vptr);
2151
2152 VAR_USED(stats);
2153 return works;
2154}
2155
2156static int velocity_poll(struct napi_struct *napi, int budget)
2157{
2158 struct velocity_info *vptr = container_of(napi,
2159 struct velocity_info, napi);
2160 unsigned int rx_done;
2161 unsigned long flags;
2162
2163
2164
2165
2166
2167 rx_done = velocity_rx_srv(vptr, budget);
2168 spin_lock_irqsave(&vptr->lock, flags);
2169 velocity_tx_srv(vptr);
2170
2171 if (rx_done < budget) {
2172 napi_complete_done(napi, rx_done);
2173 mac_enable_int(vptr->mac_regs);
2174 }
2175 spin_unlock_irqrestore(&vptr->lock, flags);
2176
2177 return rx_done;
2178}
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190static irqreturn_t velocity_intr(int irq, void *dev_instance)
2191{
2192 struct net_device *dev = dev_instance;
2193 struct velocity_info *vptr = netdev_priv(dev);
2194 u32 isr_status;
2195
2196 spin_lock(&vptr->lock);
2197 isr_status = mac_read_isr(vptr->mac_regs);
2198
2199
2200 if (isr_status == 0) {
2201 spin_unlock(&vptr->lock);
2202 return IRQ_NONE;
2203 }
2204
2205
2206 mac_write_isr(vptr->mac_regs, isr_status);
2207
2208 if (likely(napi_schedule_prep(&vptr->napi))) {
2209 mac_disable_int(vptr->mac_regs);
2210 __napi_schedule(&vptr->napi);
2211 }
2212
2213 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2214 velocity_error(vptr, isr_status);
2215
2216 spin_unlock(&vptr->lock);
2217
2218 return IRQ_HANDLED;
2219}
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231static int velocity_open(struct net_device *dev)
2232{
2233 struct velocity_info *vptr = netdev_priv(dev);
2234 int ret;
2235
2236 ret = velocity_init_rings(vptr, dev->mtu);
2237 if (ret < 0)
2238 goto out;
2239
2240
2241 velocity_set_power_state(vptr, PCI_D0);
2242
2243 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2244
2245 ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED,
2246 dev->name, dev);
2247 if (ret < 0) {
2248
2249 velocity_set_power_state(vptr, PCI_D3hot);
2250 velocity_free_rings(vptr);
2251 goto out;
2252 }
2253
2254 velocity_give_many_rx_descs(vptr);
2255
2256 mac_enable_int(vptr->mac_regs);
2257 netif_start_queue(dev);
2258 napi_enable(&vptr->napi);
2259 vptr->flags |= VELOCITY_FLAGS_OPENED;
2260out:
2261 return ret;
2262}
2263
2264
2265
2266
2267
2268
2269
2270
2271static void velocity_shutdown(struct velocity_info *vptr)
2272{
2273 struct mac_regs __iomem *regs = vptr->mac_regs;
2274 mac_disable_int(regs);
2275 writel(CR0_STOP, ®s->CR0Set);
2276 writew(0xFFFF, ®s->TDCSRClr);
2277 writeb(0xFF, ®s->RDCSRClr);
2278 safe_disable_mii_autopoll(regs);
2279 mac_clear_isr(regs);
2280}
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2292{
2293 struct velocity_info *vptr = netdev_priv(dev);
2294 int ret = 0;
2295
2296 if (!netif_running(dev)) {
2297 dev->mtu = new_mtu;
2298 goto out_0;
2299 }
2300
2301 if (dev->mtu != new_mtu) {
2302 struct velocity_info *tmp_vptr;
2303 unsigned long flags;
2304 struct rx_info rx;
2305 struct tx_info tx;
2306
2307 tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2308 if (!tmp_vptr) {
2309 ret = -ENOMEM;
2310 goto out_0;
2311 }
2312
2313 tmp_vptr->netdev = dev;
2314 tmp_vptr->pdev = vptr->pdev;
2315 tmp_vptr->dev = vptr->dev;
2316 tmp_vptr->options = vptr->options;
2317 tmp_vptr->tx.numq = vptr->tx.numq;
2318
2319 ret = velocity_init_rings(tmp_vptr, new_mtu);
2320 if (ret < 0)
2321 goto out_free_tmp_vptr_1;
2322
2323 napi_disable(&vptr->napi);
2324
2325 spin_lock_irqsave(&vptr->lock, flags);
2326
2327 netif_stop_queue(dev);
2328 velocity_shutdown(vptr);
2329
2330 rx = vptr->rx;
2331 tx = vptr->tx;
2332
2333 vptr->rx = tmp_vptr->rx;
2334 vptr->tx = tmp_vptr->tx;
2335
2336 tmp_vptr->rx = rx;
2337 tmp_vptr->tx = tx;
2338
2339 dev->mtu = new_mtu;
2340
2341 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2342
2343 velocity_give_many_rx_descs(vptr);
2344
2345 napi_enable(&vptr->napi);
2346
2347 mac_enable_int(vptr->mac_regs);
2348 netif_start_queue(dev);
2349
2350 spin_unlock_irqrestore(&vptr->lock, flags);
2351
2352 velocity_free_rings(tmp_vptr);
2353
2354out_free_tmp_vptr_1:
2355 kfree(tmp_vptr);
2356 }
2357out_0:
2358 return ret;
2359}
2360
2361#ifdef CONFIG_NET_POLL_CONTROLLER
2362
2363
2364
2365
2366
2367
2368
2369
2370static void velocity_poll_controller(struct net_device *dev)
2371{
2372 disable_irq(dev->irq);
2373 velocity_intr(dev->irq, dev);
2374 enable_irq(dev->irq);
2375}
2376#endif
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2389{
2390 struct velocity_info *vptr = netdev_priv(dev);
2391 struct mac_regs __iomem *regs = vptr->mac_regs;
2392 unsigned long flags;
2393 struct mii_ioctl_data *miidata = if_mii(ifr);
2394 int err;
2395
2396 switch (cmd) {
2397 case SIOCGMIIPHY:
2398 miidata->phy_id = readb(®s->MIIADR) & 0x1f;
2399 break;
2400 case SIOCGMIIREG:
2401 if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2402 return -ETIMEDOUT;
2403 break;
2404 case SIOCSMIIREG:
2405 spin_lock_irqsave(&vptr->lock, flags);
2406 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2407 spin_unlock_irqrestore(&vptr->lock, flags);
2408 check_connection_type(vptr->mac_regs);
2409 if (err)
2410 return err;
2411 break;
2412 default:
2413 return -EOPNOTSUPP;
2414 }
2415 return 0;
2416}
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2428{
2429 struct velocity_info *vptr = netdev_priv(dev);
2430 int ret;
2431
2432
2433
2434
2435 if (!netif_running(dev))
2436 velocity_set_power_state(vptr, PCI_D0);
2437
2438 switch (cmd) {
2439 case SIOCGMIIPHY:
2440 case SIOCGMIIREG:
2441 case SIOCSMIIREG:
2442 ret = velocity_mii_ioctl(dev, rq, cmd);
2443 break;
2444
2445 default:
2446 ret = -EOPNOTSUPP;
2447 }
2448 if (!netif_running(dev))
2449 velocity_set_power_state(vptr, PCI_D3hot);
2450
2451
2452 return ret;
2453}
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2466{
2467 struct velocity_info *vptr = netdev_priv(dev);
2468
2469
2470 if (!netif_running(dev))
2471 return &dev->stats;
2472
2473 spin_lock_irq(&vptr->lock);
2474 velocity_update_hw_mibs(vptr);
2475 spin_unlock_irq(&vptr->lock);
2476
2477 dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2478 dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2479 dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2480
2481
2482 dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2483
2484
2485
2486 dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2487
2488
2489
2490
2491
2492
2493
2494 return &dev->stats;
2495}
2496
2497
2498
2499
2500
2501
2502
2503
2504static int velocity_close(struct net_device *dev)
2505{
2506 struct velocity_info *vptr = netdev_priv(dev);
2507
2508 napi_disable(&vptr->napi);
2509 netif_stop_queue(dev);
2510 velocity_shutdown(vptr);
2511
2512 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2513 velocity_get_ip(vptr);
2514
2515 free_irq(dev->irq, dev);
2516
2517 velocity_free_rings(vptr);
2518
2519 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2520 return 0;
2521}
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2532 struct net_device *dev)
2533{
2534 struct velocity_info *vptr = netdev_priv(dev);
2535 int qnum = 0;
2536 struct tx_desc *td_ptr;
2537 struct velocity_td_info *tdinfo;
2538 unsigned long flags;
2539 int pktlen;
2540 int index, prev;
2541 int i = 0;
2542
2543 if (skb_padto(skb, ETH_ZLEN))
2544 goto out;
2545
2546
2547
2548 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2549 dev_kfree_skb_any(skb);
2550 return NETDEV_TX_OK;
2551 }
2552
2553 pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2554 max_t(unsigned int, skb->len, ETH_ZLEN) :
2555 skb_headlen(skb);
2556
2557 spin_lock_irqsave(&vptr->lock, flags);
2558
2559 index = vptr->tx.curr[qnum];
2560 td_ptr = &(vptr->tx.rings[qnum][index]);
2561 tdinfo = &(vptr->tx.infos[qnum][index]);
2562
2563 td_ptr->tdesc1.TCR = TCR0_TIC;
2564 td_ptr->td_buf[0].size &= ~TD_QUEUE;
2565
2566
2567
2568
2569
2570 tdinfo->skb = skb;
2571 tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
2572 DMA_TO_DEVICE);
2573 td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2574 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2575 td_ptr->td_buf[0].pa_high = 0;
2576 td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2577
2578
2579 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2580 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2581
2582 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
2583 frag, 0,
2584 skb_frag_size(frag),
2585 DMA_TO_DEVICE);
2586
2587 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2588 td_ptr->td_buf[i + 1].pa_high = 0;
2589 td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
2590 }
2591 tdinfo->nskb_dma = i + 1;
2592
2593 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2594
2595 if (skb_vlan_tag_present(skb)) {
2596 td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
2597 td_ptr->tdesc1.TCR |= TCR0_VETAG;
2598 }
2599
2600
2601
2602
2603 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2604 const struct iphdr *ip = ip_hdr(skb);
2605 if (ip->protocol == IPPROTO_TCP)
2606 td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2607 else if (ip->protocol == IPPROTO_UDP)
2608 td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2609 td_ptr->tdesc1.TCR |= TCR0_IPCK;
2610 }
2611
2612 prev = index - 1;
2613 if (prev < 0)
2614 prev = vptr->options.numtx - 1;
2615 td_ptr->tdesc0.len |= OWNED_BY_NIC;
2616 vptr->tx.used[qnum]++;
2617 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2618
2619 if (AVAIL_TD(vptr, qnum) < 1)
2620 netif_stop_queue(dev);
2621
2622 td_ptr = &(vptr->tx.rings[qnum][prev]);
2623 td_ptr->td_buf[0].size |= TD_QUEUE;
2624 mac_tx_queue_wake(vptr->mac_regs, qnum);
2625
2626 spin_unlock_irqrestore(&vptr->lock, flags);
2627out:
2628 return NETDEV_TX_OK;
2629}
2630
2631static const struct net_device_ops velocity_netdev_ops = {
2632 .ndo_open = velocity_open,
2633 .ndo_stop = velocity_close,
2634 .ndo_start_xmit = velocity_xmit,
2635 .ndo_get_stats = velocity_get_stats,
2636 .ndo_validate_addr = eth_validate_addr,
2637 .ndo_set_mac_address = eth_mac_addr,
2638 .ndo_set_rx_mode = velocity_set_multi,
2639 .ndo_change_mtu = velocity_change_mtu,
2640 .ndo_do_ioctl = velocity_ioctl,
2641 .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
2642 .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
2643#ifdef CONFIG_NET_POLL_CONTROLLER
2644 .ndo_poll_controller = velocity_poll_controller,
2645#endif
2646};
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656static void velocity_init_info(struct velocity_info *vptr,
2657 const struct velocity_info_tbl *info)
2658{
2659 vptr->chip_id = info->chip_id;
2660 vptr->tx.numq = info->txqueue;
2661 vptr->multicast_limit = MCAM_SIZE;
2662 spin_lock_init(&vptr->lock);
2663}
2664
2665
2666
2667
2668
2669
2670
2671
2672static int velocity_get_pci_info(struct velocity_info *vptr)
2673{
2674 struct pci_dev *pdev = vptr->pdev;
2675
2676 pci_set_master(pdev);
2677
2678 vptr->ioaddr = pci_resource_start(pdev, 0);
2679 vptr->memaddr = pci_resource_start(pdev, 1);
2680
2681 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2682 dev_err(&pdev->dev,
2683 "region #0 is not an I/O resource, aborting.\n");
2684 return -EINVAL;
2685 }
2686
2687 if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2688 dev_err(&pdev->dev,
2689 "region #1 is an I/O resource, aborting.\n");
2690 return -EINVAL;
2691 }
2692
2693 if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2694 dev_err(&pdev->dev, "region #1 is too small.\n");
2695 return -EINVAL;
2696 }
2697
2698 return 0;
2699}
2700
2701
2702
2703
2704
2705
2706
2707static int velocity_get_platform_info(struct velocity_info *vptr)
2708{
2709 struct resource res;
2710 int ret;
2711
2712 if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
2713 vptr->no_eeprom = 1;
2714
2715 ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
2716 if (ret) {
2717 dev_err(vptr->dev, "unable to find memory address\n");
2718 return ret;
2719 }
2720
2721 vptr->memaddr = res.start;
2722
2723 if (resource_size(&res) < VELOCITY_IO_SIZE) {
2724 dev_err(vptr->dev, "memory region is too small.\n");
2725 return -EINVAL;
2726 }
2727
2728 return 0;
2729}
2730
2731
2732
2733
2734
2735
2736
2737
2738static void velocity_print_info(struct velocity_info *vptr)
2739{
2740 netdev_info(vptr->netdev, "%s - Ethernet Address: %pM\n",
2741 get_chip_name(vptr->chip_id), vptr->netdev->dev_addr);
2742}
2743
2744static u32 velocity_get_link(struct net_device *dev)
2745{
2746 struct velocity_info *vptr = netdev_priv(dev);
2747 struct mac_regs __iomem *regs = vptr->mac_regs;
2748 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 1 : 0;
2749}
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761static int velocity_probe(struct device *dev, int irq,
2762 const struct velocity_info_tbl *info,
2763 enum velocity_bus_type bustype)
2764{
2765 struct net_device *netdev;
2766 int i;
2767 struct velocity_info *vptr;
2768 struct mac_regs __iomem *regs;
2769 int ret = -ENOMEM;
2770
2771
2772
2773
2774 if (velocity_nics >= MAX_UNITS) {
2775 dev_notice(dev, "already found %d NICs.\n", velocity_nics);
2776 return -ENODEV;
2777 }
2778
2779 netdev = alloc_etherdev(sizeof(struct velocity_info));
2780 if (!netdev)
2781 goto out;
2782
2783
2784
2785 SET_NETDEV_DEV(netdev, dev);
2786 vptr = netdev_priv(netdev);
2787
2788 pr_info_once("%s Ver. %s\n", VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2789 pr_info_once("Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2790 pr_info_once("Copyright (c) 2004 Red Hat Inc.\n");
2791
2792 netdev->irq = irq;
2793 vptr->netdev = netdev;
2794 vptr->dev = dev;
2795
2796 velocity_init_info(vptr, info);
2797
2798 if (bustype == BUS_PCI) {
2799 vptr->pdev = to_pci_dev(dev);
2800
2801 ret = velocity_get_pci_info(vptr);
2802 if (ret < 0)
2803 goto err_free_dev;
2804 } else {
2805 vptr->pdev = NULL;
2806 ret = velocity_get_platform_info(vptr);
2807 if (ret < 0)
2808 goto err_free_dev;
2809 }
2810
2811 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2812 if (regs == NULL) {
2813 ret = -EIO;
2814 goto err_free_dev;
2815 }
2816
2817 vptr->mac_regs = regs;
2818 vptr->rev_id = readb(®s->rev_id);
2819
2820 mac_wol_reset(regs);
2821
2822 for (i = 0; i < 6; i++)
2823 netdev->dev_addr[i] = readb(®s->PAR[i]);
2824
2825
2826 velocity_get_options(&vptr->options, velocity_nics);
2827
2828
2829
2830
2831
2832 vptr->options.flags &= info->flags;
2833
2834
2835
2836
2837
2838 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2839
2840 vptr->wol_opts = vptr->options.wol_opts;
2841 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2842
2843 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2844
2845 netdev->netdev_ops = &velocity_netdev_ops;
2846 netdev->ethtool_ops = &velocity_ethtool_ops;
2847 netif_napi_add(netdev, &vptr->napi, velocity_poll,
2848 VELOCITY_NAPI_WEIGHT);
2849
2850 netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2851 NETIF_F_HW_VLAN_CTAG_TX;
2852 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2853 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
2854 NETIF_F_IP_CSUM;
2855
2856
2857 netdev->min_mtu = VELOCITY_MIN_MTU;
2858 netdev->max_mtu = VELOCITY_MAX_MTU;
2859
2860 ret = register_netdev(netdev);
2861 if (ret < 0)
2862 goto err_iounmap;
2863
2864 if (!velocity_get_link(netdev)) {
2865 netif_carrier_off(netdev);
2866 vptr->mii_status |= VELOCITY_LINK_FAIL;
2867 }
2868
2869 velocity_print_info(vptr);
2870 dev_set_drvdata(vptr->dev, netdev);
2871
2872
2873
2874 velocity_set_power_state(vptr, PCI_D3hot);
2875 velocity_nics++;
2876out:
2877 return ret;
2878
2879err_iounmap:
2880 netif_napi_del(&vptr->napi);
2881 iounmap(regs);
2882err_free_dev:
2883 free_netdev(netdev);
2884 goto out;
2885}
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895static int velocity_remove(struct device *dev)
2896{
2897 struct net_device *netdev = dev_get_drvdata(dev);
2898 struct velocity_info *vptr = netdev_priv(netdev);
2899
2900 unregister_netdev(netdev);
2901 netif_napi_del(&vptr->napi);
2902 iounmap(vptr->mac_regs);
2903 free_netdev(netdev);
2904 velocity_nics--;
2905
2906 return 0;
2907}
2908
2909static int velocity_pci_probe(struct pci_dev *pdev,
2910 const struct pci_device_id *ent)
2911{
2912 const struct velocity_info_tbl *info =
2913 &chip_info_table[ent->driver_data];
2914 int ret;
2915
2916 ret = pci_enable_device(pdev);
2917 if (ret < 0)
2918 return ret;
2919
2920 ret = pci_request_regions(pdev, VELOCITY_NAME);
2921 if (ret < 0) {
2922 dev_err(&pdev->dev, "No PCI resources.\n");
2923 goto fail1;
2924 }
2925
2926 ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI);
2927 if (ret == 0)
2928 return 0;
2929
2930 pci_release_regions(pdev);
2931fail1:
2932 pci_disable_device(pdev);
2933 return ret;
2934}
2935
2936static void velocity_pci_remove(struct pci_dev *pdev)
2937{
2938 velocity_remove(&pdev->dev);
2939
2940 pci_release_regions(pdev);
2941 pci_disable_device(pdev);
2942}
2943
2944static int velocity_platform_probe(struct platform_device *pdev)
2945{
2946 const struct of_device_id *of_id;
2947 const struct velocity_info_tbl *info;
2948 int irq;
2949
2950 of_id = of_match_device(velocity_of_ids, &pdev->dev);
2951 if (!of_id)
2952 return -EINVAL;
2953 info = of_id->data;
2954
2955 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
2956 if (!irq)
2957 return -EINVAL;
2958
2959 return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
2960}
2961
2962static int velocity_platform_remove(struct platform_device *pdev)
2963{
2964 velocity_remove(&pdev->dev);
2965
2966 return 0;
2967}
2968
2969#ifdef CONFIG_PM_SLEEP
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2980{
2981 u16 crc = 0xFFFF;
2982 u8 mask;
2983 int i, j;
2984
2985 for (i = 0; i < size; i++) {
2986 mask = mask_pattern[i];
2987
2988
2989 if (mask == 0x00)
2990 continue;
2991
2992 for (j = 0; j < 8; j++) {
2993 if ((mask & 0x01) == 0) {
2994 mask >>= 1;
2995 continue;
2996 }
2997 mask >>= 1;
2998 crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2999 }
3000 }
3001
3002 crc = ~crc;
3003 return bitrev32(crc) >> 16;
3004}
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015static int velocity_set_wol(struct velocity_info *vptr)
3016{
3017 struct mac_regs __iomem *regs = vptr->mac_regs;
3018 enum speed_opt spd_dpx = vptr->options.spd_dpx;
3019 static u8 buf[256];
3020 int i;
3021
3022 static u32 mask_pattern[2][4] = {
3023 {0x00203000, 0x000003C0, 0x00000000, 0x0000000},
3024 {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}
3025 };
3026
3027 writew(0xFFFF, ®s->WOLCRClr);
3028 writeb(WOLCFG_SAB | WOLCFG_SAM, ®s->WOLCFGSet);
3029 writew(WOLCR_MAGIC_EN, ®s->WOLCRSet);
3030
3031
3032
3033
3034
3035
3036 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3037 writew(WOLCR_UNICAST_EN, ®s->WOLCRSet);
3038
3039 if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3040 struct arp_packet *arp = (struct arp_packet *) buf;
3041 u16 crc;
3042 memset(buf, 0, sizeof(struct arp_packet) + 7);
3043
3044 for (i = 0; i < 4; i++)
3045 writel(mask_pattern[0][i], ®s->ByteMask[0][i]);
3046
3047 arp->type = htons(ETH_P_ARP);
3048 arp->ar_op = htons(1);
3049
3050 memcpy(arp->ar_tip, vptr->ip_addr, 4);
3051
3052 crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
3053 (u8 *) & mask_pattern[0][0]);
3054
3055 writew(crc, ®s->PatternCRC[0]);
3056 writew(WOLCR_ARP_EN, ®s->WOLCRSet);
3057 }
3058
3059 BYTE_REG_BITS_ON(PWCFG_WOLTYPE, ®s->PWCFGSet);
3060 BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, ®s->PWCFGSet);
3061
3062 writew(0x0FFF, ®s->WOLSRClr);
3063
3064 if (spd_dpx == SPD_DPX_1000_FULL)
3065 goto mac_done;
3066
3067 if (spd_dpx != SPD_DPX_AUTO)
3068 goto advertise_done;
3069
3070 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3071 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3072 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
3073
3074 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
3075 }
3076
3077 if (vptr->mii_status & VELOCITY_SPEED_1000)
3078 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
3079
3080advertise_done:
3081 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR);
3082
3083 {
3084 u8 GCR;
3085 GCR = readb(®s->CHIPGCR);
3086 GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
3087 writeb(GCR, ®s->CHIPGCR);
3088 }
3089
3090mac_done:
3091 BYTE_REG_BITS_OFF(ISR_PWEI, ®s->ISR);
3092
3093 BYTE_REG_BITS_ON(STICKHW_SWPTAG, ®s->STICKHW);
3094
3095 BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW);
3096
3097 return 0;
3098}
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
3111{
3112 struct mac_regs __iomem *regs = vptr->mac_regs;
3113 u16 i;
3114 u8 __iomem *ptr = (u8 __iomem *)regs;
3115
3116 for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3117 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3118
3119 for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3120 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3121
3122 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3123 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3124
3125}
3126
3127static int velocity_suspend(struct device *dev)
3128{
3129 struct net_device *netdev = dev_get_drvdata(dev);
3130 struct velocity_info *vptr = netdev_priv(netdev);
3131 unsigned long flags;
3132
3133 if (!netif_running(vptr->netdev))
3134 return 0;
3135
3136 netif_device_detach(vptr->netdev);
3137
3138 spin_lock_irqsave(&vptr->lock, flags);
3139 if (vptr->pdev)
3140 pci_save_state(vptr->pdev);
3141
3142 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3143 velocity_get_ip(vptr);
3144 velocity_save_context(vptr, &vptr->context);
3145 velocity_shutdown(vptr);
3146 velocity_set_wol(vptr);
3147 if (vptr->pdev)
3148 pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
3149 velocity_set_power_state(vptr, PCI_D3hot);
3150 } else {
3151 velocity_save_context(vptr, &vptr->context);
3152 velocity_shutdown(vptr);
3153 if (vptr->pdev)
3154 pci_disable_device(vptr->pdev);
3155 velocity_set_power_state(vptr, PCI_D3hot);
3156 }
3157
3158 spin_unlock_irqrestore(&vptr->lock, flags);
3159 return 0;
3160}
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3171{
3172 struct mac_regs __iomem *regs = vptr->mac_regs;
3173 int i;
3174 u8 __iomem *ptr = (u8 __iomem *)regs;
3175
3176 for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3177 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3178
3179
3180 for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3181
3182 writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3183
3184 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3185 }
3186
3187 for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3188 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3189
3190 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3191 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3192
3193 for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3194 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3195}
3196
3197static int velocity_resume(struct device *dev)
3198{
3199 struct net_device *netdev = dev_get_drvdata(dev);
3200 struct velocity_info *vptr = netdev_priv(netdev);
3201 unsigned long flags;
3202 int i;
3203
3204 if (!netif_running(vptr->netdev))
3205 return 0;
3206
3207 velocity_set_power_state(vptr, PCI_D0);
3208
3209 if (vptr->pdev) {
3210 pci_enable_wake(vptr->pdev, PCI_D0, 0);
3211 pci_restore_state(vptr->pdev);
3212 }
3213
3214 mac_wol_reset(vptr->mac_regs);
3215
3216 spin_lock_irqsave(&vptr->lock, flags);
3217 velocity_restore_context(vptr, &vptr->context);
3218 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3219 mac_disable_int(vptr->mac_regs);
3220
3221 velocity_tx_srv(vptr);
3222
3223 for (i = 0; i < vptr->tx.numq; i++) {
3224 if (vptr->tx.used[i])
3225 mac_tx_queue_wake(vptr->mac_regs, i);
3226 }
3227
3228 mac_enable_int(vptr->mac_regs);
3229 spin_unlock_irqrestore(&vptr->lock, flags);
3230 netif_device_attach(vptr->netdev);
3231
3232 return 0;
3233}
3234#endif
3235
3236static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume);
3237
3238
3239
3240
3241
3242static struct pci_driver velocity_pci_driver = {
3243 .name = VELOCITY_NAME,
3244 .id_table = velocity_pci_id_table,
3245 .probe = velocity_pci_probe,
3246 .remove = velocity_pci_remove,
3247 .driver = {
3248 .pm = &velocity_pm_ops,
3249 },
3250};
3251
3252static struct platform_driver velocity_platform_driver = {
3253 .probe = velocity_platform_probe,
3254 .remove = velocity_platform_remove,
3255 .driver = {
3256 .name = "via-velocity",
3257 .of_match_table = velocity_of_ids,
3258 .pm = &velocity_pm_ops,
3259 },
3260};
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270static int velocity_ethtool_up(struct net_device *dev)
3271{
3272 struct velocity_info *vptr = netdev_priv(dev);
3273
3274 if (vptr->ethtool_ops_nesting == U32_MAX)
3275 return -EBUSY;
3276 if (!vptr->ethtool_ops_nesting++ && !netif_running(dev))
3277 velocity_set_power_state(vptr, PCI_D0);
3278 return 0;
3279}
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289static void velocity_ethtool_down(struct net_device *dev)
3290{
3291 struct velocity_info *vptr = netdev_priv(dev);
3292
3293 if (!--vptr->ethtool_ops_nesting && !netif_running(dev))
3294 velocity_set_power_state(vptr, PCI_D3hot);
3295}
3296
3297static int velocity_get_link_ksettings(struct net_device *dev,
3298 struct ethtool_link_ksettings *cmd)
3299{
3300 struct velocity_info *vptr = netdev_priv(dev);
3301 struct mac_regs __iomem *regs = vptr->mac_regs;
3302 u32 status;
3303 u32 supported, advertising;
3304
3305 status = check_connection_type(vptr->mac_regs);
3306
3307 supported = SUPPORTED_TP |
3308 SUPPORTED_Autoneg |
3309 SUPPORTED_10baseT_Half |
3310 SUPPORTED_10baseT_Full |
3311 SUPPORTED_100baseT_Half |
3312 SUPPORTED_100baseT_Full |
3313 SUPPORTED_1000baseT_Half |
3314 SUPPORTED_1000baseT_Full;
3315
3316 advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3317 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3318 advertising |=
3319 ADVERTISED_10baseT_Half |
3320 ADVERTISED_10baseT_Full |
3321 ADVERTISED_100baseT_Half |
3322 ADVERTISED_100baseT_Full |
3323 ADVERTISED_1000baseT_Half |
3324 ADVERTISED_1000baseT_Full;
3325 } else {
3326 switch (vptr->options.spd_dpx) {
3327 case SPD_DPX_1000_FULL:
3328 advertising |= ADVERTISED_1000baseT_Full;
3329 break;
3330 case SPD_DPX_100_HALF:
3331 advertising |= ADVERTISED_100baseT_Half;
3332 break;
3333 case SPD_DPX_100_FULL:
3334 advertising |= ADVERTISED_100baseT_Full;
3335 break;
3336 case SPD_DPX_10_HALF:
3337 advertising |= ADVERTISED_10baseT_Half;
3338 break;
3339 case SPD_DPX_10_FULL:
3340 advertising |= ADVERTISED_10baseT_Full;
3341 break;
3342 default:
3343 break;
3344 }
3345 }
3346
3347 if (status & VELOCITY_SPEED_1000)
3348 cmd->base.speed = SPEED_1000;
3349 else if (status & VELOCITY_SPEED_100)
3350 cmd->base.speed = SPEED_100;
3351 else
3352 cmd->base.speed = SPEED_10;
3353
3354 cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
3355 AUTONEG_ENABLE : AUTONEG_DISABLE;
3356 cmd->base.port = PORT_TP;
3357 cmd->base.phy_address = readb(®s->MIIADR) & 0x1F;
3358
3359 if (status & VELOCITY_DUPLEX_FULL)
3360 cmd->base.duplex = DUPLEX_FULL;
3361 else
3362 cmd->base.duplex = DUPLEX_HALF;
3363
3364 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3365 supported);
3366 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3367 advertising);
3368
3369 return 0;
3370}
3371
3372static int velocity_set_link_ksettings(struct net_device *dev,
3373 const struct ethtool_link_ksettings *cmd)
3374{
3375 struct velocity_info *vptr = netdev_priv(dev);
3376 u32 speed = cmd->base.speed;
3377 u32 curr_status;
3378 u32 new_status = 0;
3379 int ret = 0;
3380
3381 curr_status = check_connection_type(vptr->mac_regs);
3382 curr_status &= (~VELOCITY_LINK_FAIL);
3383
3384 new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3385 new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3386 new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3387 new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3388 new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
3389 VELOCITY_DUPLEX_FULL : 0);
3390
3391 if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3392 (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3393 ret = -EINVAL;
3394 } else {
3395 enum speed_opt spd_dpx;
3396
3397 if (new_status & VELOCITY_AUTONEG_ENABLE)
3398 spd_dpx = SPD_DPX_AUTO;
3399 else if ((new_status & VELOCITY_SPEED_1000) &&
3400 (new_status & VELOCITY_DUPLEX_FULL)) {
3401 spd_dpx = SPD_DPX_1000_FULL;
3402 } else if (new_status & VELOCITY_SPEED_100)
3403 spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3404 SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3405 else if (new_status & VELOCITY_SPEED_10)
3406 spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3407 SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3408 else
3409 return -EOPNOTSUPP;
3410
3411 vptr->options.spd_dpx = spd_dpx;
3412
3413 velocity_set_media_mode(vptr, new_status);
3414 }
3415
3416 return ret;
3417}
3418
3419static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3420{
3421 struct velocity_info *vptr = netdev_priv(dev);
3422
3423 strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3424 strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3425 if (vptr->pdev)
3426 strlcpy(info->bus_info, pci_name(vptr->pdev),
3427 sizeof(info->bus_info));
3428 else
3429 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
3430}
3431
3432static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3433{
3434 struct velocity_info *vptr = netdev_priv(dev);
3435 wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3436 wol->wolopts |= WAKE_MAGIC;
3437
3438
3439
3440
3441 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3442 wol->wolopts |= WAKE_UCAST;
3443 if (vptr->wol_opts & VELOCITY_WOL_ARP)
3444 wol->wolopts |= WAKE_ARP;
3445 memcpy(&wol->sopass, vptr->wol_passwd, 6);
3446}
3447
3448static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3449{
3450 struct velocity_info *vptr = netdev_priv(dev);
3451
3452 if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3453 return -EFAULT;
3454 vptr->wol_opts = VELOCITY_WOL_MAGIC;
3455
3456
3457
3458
3459
3460
3461
3462
3463 if (wol->wolopts & WAKE_MAGIC) {
3464 vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3465 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3466 }
3467 if (wol->wolopts & WAKE_UCAST) {
3468 vptr->wol_opts |= VELOCITY_WOL_UCAST;
3469 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3470 }
3471 if (wol->wolopts & WAKE_ARP) {
3472 vptr->wol_opts |= VELOCITY_WOL_ARP;
3473 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3474 }
3475 memcpy(vptr->wol_passwd, wol->sopass, 6);
3476 return 0;
3477}
3478
3479static int get_pending_timer_val(int val)
3480{
3481 int mult_bits = val >> 6;
3482 int mult = 1;
3483
3484 switch (mult_bits)
3485 {
3486 case 1:
3487 mult = 4; break;
3488 case 2:
3489 mult = 16; break;
3490 case 3:
3491 mult = 64; break;
3492 case 0:
3493 default:
3494 break;
3495 }
3496
3497 return (val & 0x3f) * mult;
3498}
3499
3500static void set_pending_timer_val(int *val, u32 us)
3501{
3502 u8 mult = 0;
3503 u8 shift = 0;
3504
3505 if (us >= 0x3f) {
3506 mult = 1;
3507 shift = 2;
3508 }
3509 if (us >= 0x3f * 4) {
3510 mult = 2;
3511 shift = 4;
3512 }
3513 if (us >= 0x3f * 16) {
3514 mult = 3;
3515 shift = 6;
3516 }
3517
3518 *val = (mult << 6) | ((us >> shift) & 0x3f);
3519}
3520
3521
3522static int velocity_get_coalesce(struct net_device *dev,
3523 struct ethtool_coalesce *ecmd)
3524{
3525 struct velocity_info *vptr = netdev_priv(dev);
3526
3527 ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3528 ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3529
3530 ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3531 ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3532
3533 return 0;
3534}
3535
3536static int velocity_set_coalesce(struct net_device *dev,
3537 struct ethtool_coalesce *ecmd)
3538{
3539 struct velocity_info *vptr = netdev_priv(dev);
3540 int max_us = 0x3f * 64;
3541 unsigned long flags;
3542
3543
3544 if (ecmd->tx_coalesce_usecs > max_us)
3545 return -EINVAL;
3546 if (ecmd->rx_coalesce_usecs > max_us)
3547 return -EINVAL;
3548
3549 if (ecmd->tx_max_coalesced_frames > 0xff)
3550 return -EINVAL;
3551 if (ecmd->rx_max_coalesced_frames > 0xff)
3552 return -EINVAL;
3553
3554 vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3555 vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3556
3557 set_pending_timer_val(&vptr->options.rxqueue_timer,
3558 ecmd->rx_coalesce_usecs);
3559 set_pending_timer_val(&vptr->options.txqueue_timer,
3560 ecmd->tx_coalesce_usecs);
3561
3562
3563 spin_lock_irqsave(&vptr->lock, flags);
3564 mac_disable_int(vptr->mac_regs);
3565 setup_adaptive_interrupts(vptr);
3566 setup_queue_timers(vptr);
3567
3568 mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3569 mac_clear_isr(vptr->mac_regs);
3570 mac_enable_int(vptr->mac_regs);
3571 spin_unlock_irqrestore(&vptr->lock, flags);
3572
3573 return 0;
3574}
3575
3576static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3577 "rx_all",
3578 "rx_ok",
3579 "tx_ok",
3580 "rx_error",
3581 "rx_runt_ok",
3582 "rx_runt_err",
3583 "rx_64",
3584 "tx_64",
3585 "rx_65_to_127",
3586 "tx_65_to_127",
3587 "rx_128_to_255",
3588 "tx_128_to_255",
3589 "rx_256_to_511",
3590 "tx_256_to_511",
3591 "rx_512_to_1023",
3592 "tx_512_to_1023",
3593 "rx_1024_to_1518",
3594 "tx_1024_to_1518",
3595 "tx_ether_collisions",
3596 "rx_crc_errors",
3597 "rx_jumbo",
3598 "tx_jumbo",
3599 "rx_mac_control_frames",
3600 "tx_mac_control_frames",
3601 "rx_frame_alignment_errors",
3602 "rx_long_ok",
3603 "rx_long_err",
3604 "tx_sqe_errors",
3605 "rx_no_buf",
3606 "rx_symbol_errors",
3607 "in_range_length_errors",
3608 "late_collisions"
3609};
3610
3611static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
3612{
3613 switch (sset) {
3614 case ETH_SS_STATS:
3615 memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
3616 break;
3617 }
3618}
3619
3620static int velocity_get_sset_count(struct net_device *dev, int sset)
3621{
3622 switch (sset) {
3623 case ETH_SS_STATS:
3624 return ARRAY_SIZE(velocity_gstrings);
3625 default:
3626 return -EOPNOTSUPP;
3627 }
3628}
3629
3630static void velocity_get_ethtool_stats(struct net_device *dev,
3631 struct ethtool_stats *stats, u64 *data)
3632{
3633 if (netif_running(dev)) {
3634 struct velocity_info *vptr = netdev_priv(dev);
3635 u32 *p = vptr->mib_counter;
3636 int i;
3637
3638 spin_lock_irq(&vptr->lock);
3639 velocity_update_hw_mibs(vptr);
3640 spin_unlock_irq(&vptr->lock);
3641
3642 for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
3643 *data++ = *p++;
3644 }
3645}
3646
3647static const struct ethtool_ops velocity_ethtool_ops = {
3648 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
3649 ETHTOOL_COALESCE_MAX_FRAMES,
3650 .get_drvinfo = velocity_get_drvinfo,
3651 .get_wol = velocity_ethtool_get_wol,
3652 .set_wol = velocity_ethtool_set_wol,
3653 .get_link = velocity_get_link,
3654 .get_strings = velocity_get_strings,
3655 .get_sset_count = velocity_get_sset_count,
3656 .get_ethtool_stats = velocity_get_ethtool_stats,
3657 .get_coalesce = velocity_get_coalesce,
3658 .set_coalesce = velocity_set_coalesce,
3659 .begin = velocity_ethtool_up,
3660 .complete = velocity_ethtool_down,
3661 .get_link_ksettings = velocity_get_link_ksettings,
3662 .set_link_ksettings = velocity_set_link_ksettings,
3663};
3664
3665#if defined(CONFIG_PM) && defined(CONFIG_INET)
3666static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3667{
3668 struct in_ifaddr *ifa = ptr;
3669 struct net_device *dev = ifa->ifa_dev->dev;
3670
3671 if (dev_net(dev) == &init_net &&
3672 dev->netdev_ops == &velocity_netdev_ops)
3673 velocity_get_ip(netdev_priv(dev));
3674
3675 return NOTIFY_DONE;
3676}
3677
3678static struct notifier_block velocity_inetaddr_notifier = {
3679 .notifier_call = velocity_netdev_event,
3680};
3681
3682static void velocity_register_notifier(void)
3683{
3684 register_inetaddr_notifier(&velocity_inetaddr_notifier);
3685}
3686
3687static void velocity_unregister_notifier(void)
3688{
3689 unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3690}
3691
3692#else
3693
3694#define velocity_register_notifier() do {} while (0)
3695#define velocity_unregister_notifier() do {} while (0)
3696
3697#endif
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707static int __init velocity_init_module(void)
3708{
3709 int ret_pci, ret_platform;
3710
3711 velocity_register_notifier();
3712
3713 ret_pci = pci_register_driver(&velocity_pci_driver);
3714 ret_platform = platform_driver_register(&velocity_platform_driver);
3715
3716
3717 if ((ret_pci < 0) && (ret_platform < 0)) {
3718 velocity_unregister_notifier();
3719 return ret_pci;
3720 }
3721
3722 return 0;
3723}
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733static void __exit velocity_cleanup_module(void)
3734{
3735 velocity_unregister_notifier();
3736
3737 pci_unregister_driver(&velocity_pci_driver);
3738 platform_driver_unregister(&velocity_platform_driver);
3739}
3740
3741module_init(velocity_init_module);
3742module_exit(velocity_cleanup_module);
3743