1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/delay.h>
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/spinlock.h>
37#include <linux/init.h>
38#include <linux/pci.h>
39#include <linux/ioport.h>
40#include <linux/slab.h>
41
42#include <asm/byteorder.h>
43#include <asm/pdc.h>
44#include <asm/pdcpat.h>
45#include <asm/page.h>
46#include <asm/system.h>
47
48#include <asm/ropes.h>
49#include <asm/hardware.h>
50#include <asm/parisc-device.h>
51#include <asm/io.h>
52
53#undef DEBUG_LBA
54#undef DEBUG_LBA_PORT
55#undef DEBUG_LBA_CFG
56#undef DEBUG_LBA_PAT
57
58#undef FBB_SUPPORT
59
60
61#ifdef DEBUG_LBA
62#define DBG(x...) printk(x)
63#else
64#define DBG(x...)
65#endif
66
67#ifdef DEBUG_LBA_PORT
68#define DBG_PORT(x...) printk(x)
69#else
70#define DBG_PORT(x...)
71#endif
72
73#ifdef DEBUG_LBA_CFG
74#define DBG_CFG(x...) printk(x)
75#else
76#define DBG_CFG(x...)
77#endif
78
79#ifdef DEBUG_LBA_PAT
80#define DBG_PAT(x...) printk(x)
81#else
82#define DBG_PAT(x...)
83#endif
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100#define MODULE_NAME "LBA"
101
102
103#define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL)
104static void __iomem *astro_iop_base __read_mostly;
105
106static u32 lba_t32;
107
108
109#define LBA_FLAG_SKIP_PROBE 0x10
110
111#define LBA_SKIP_PROBE(d) ((d)->flags & LBA_FLAG_SKIP_PROBE)
112
113
114
115#define LBA_DEV(d) ((struct lba_device *) (d))
116
117
118
119
120
121
122#define LBA_MAX_NUM_BUSES 8
123
124
125
126
127
128
129
130#define READ_U8(addr) __raw_readb(addr)
131#define READ_U16(addr) __raw_readw(addr)
132#define READ_U32(addr) __raw_readl(addr)
133#define WRITE_U8(value, addr) __raw_writeb(value, addr)
134#define WRITE_U16(value, addr) __raw_writew(value, addr)
135#define WRITE_U32(value, addr) __raw_writel(value, addr)
136
137#define READ_REG8(addr) readb(addr)
138#define READ_REG16(addr) readw(addr)
139#define READ_REG32(addr) readl(addr)
140#define READ_REG64(addr) readq(addr)
141#define WRITE_REG8(value, addr) writeb(value, addr)
142#define WRITE_REG16(value, addr) writew(value, addr)
143#define WRITE_REG32(value, addr) writel(value, addr)
144
145
146#define LBA_CFG_TOK(bus,dfn) ((u32) ((bus)<<16 | (dfn)<<8))
147#define LBA_CFG_BUS(tok) ((u8) ((tok)>>16))
148#define LBA_CFG_DEV(tok) ((u8) ((tok)>>11) & 0x1f)
149#define LBA_CFG_FUNC(tok) ((u8) ((tok)>>8 ) & 0x7)
150
151
152
153
154
155
156#define ROPES_PER_IOC 8
157#define LBA_NUM(x) ((((unsigned long) x) >> 13) & (ROPES_PER_IOC-1))
158
159
160static void
161lba_dump_res(struct resource *r, int d)
162{
163 int i;
164
165 if (NULL == r)
166 return;
167
168 printk(KERN_DEBUG "(%p)", r->parent);
169 for (i = d; i ; --i) printk(" ");
170 printk(KERN_DEBUG "%p [%lx,%lx]/%lx\n", r,
171 (long)r->start, (long)r->end, r->flags);
172 lba_dump_res(r->child, d+2);
173 lba_dump_res(r->sibling, d);
174}
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191static int lba_device_present(u8 bus, u8 dfn, struct lba_device *d)
192{
193 u8 first_bus = d->hba.hba_bus->secondary;
194 u8 last_sub_bus = d->hba.hba_bus->subordinate;
195
196 if ((bus < first_bus) ||
197 (bus > last_sub_bus) ||
198 ((bus - first_bus) >= LBA_MAX_NUM_BUSES)) {
199 return 0;
200 }
201
202 return 1;
203}
204
205
206
207#define LBA_CFG_SETUP(d, tok) { \
208 \
209 error_config = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); \
210\
211 \
212 status_control = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); \
213\
214
215
216 \
217 \
218 arb_mask = READ_REG32(d->hba.base_addr + LBA_ARB_MASK); \
219\
220
221
222
223 \
224 WRITE_REG32(0x1, d->hba.base_addr + LBA_ARB_MASK); \
225\
226
227
228
229 \
230 WRITE_REG32(error_config | LBA_SMART_MODE, d->hba.base_addr + LBA_ERROR_CONFIG); \
231}
232
233
234#define LBA_CFG_PROBE(d, tok) { \
235
236
237
238 \
239 WRITE_REG32(tok | PCI_VENDOR_ID, (d)->hba.base_addr + LBA_PCI_CFG_ADDR);\
240
241
242
243 \
244 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
245
246
247
248 \
249 WRITE_REG32(~0, (d)->hba.base_addr + LBA_PCI_CFG_DATA); \
250
251
252
253 \
254 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
255}
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282#define LBA_MASTER_ABORT_ERROR 0xc
283#define LBA_FATAL_ERROR 0x10
284
285#define LBA_CFG_MASTER_ABORT_CHECK(d, base, tok, error) { \
286 u32 error_status = 0; \
287
288
289
290 \
291 WRITE_REG32(status_control | CLEAR_ERRLOG_ENABLE, base + LBA_STAT_CTL); \
292 error_status = READ_REG32(base + LBA_ERROR_STATUS); \
293 if ((error_status & 0x1f) != 0) { \
294
295
296 \
297 error = 1; \
298 if ((error_status & LBA_FATAL_ERROR) == 0) { \
299
300
301
302 \
303 WRITE_REG32(status_control | CLEAR_ERRLOG, base + LBA_STAT_CTL); \
304 } \
305 } \
306}
307
308#define LBA_CFG_TR4_ADDR_SETUP(d, addr) \
309 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR);
310
311#define LBA_CFG_ADDR_SETUP(d, addr) { \
312 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
313
314
315
316 \
317 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
318}
319
320
321#define LBA_CFG_RESTORE(d, base) { \
322
323
324 \
325 WRITE_REG32(status_control, base + LBA_STAT_CTL); \
326
327
328 \
329 WRITE_REG32(error_config, base + LBA_ERROR_CONFIG); \
330
331
332 \
333 WRITE_REG32(arb_mask, base + LBA_ARB_MASK); \
334}
335
336
337
338static unsigned int
339lba_rd_cfg(struct lba_device *d, u32 tok, u8 reg, u32 size)
340{
341 u32 data = ~0U;
342 int error = 0;
343 u32 arb_mask = 0;
344 u32 error_config = 0;
345 u32 status_control = 0;
346
347 LBA_CFG_SETUP(d, tok);
348 LBA_CFG_PROBE(d, tok);
349 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
350 if (!error) {
351 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
352
353 LBA_CFG_ADDR_SETUP(d, tok | reg);
354 switch (size) {
355 case 1: data = (u32) READ_REG8(data_reg + (reg & 3)); break;
356 case 2: data = (u32) READ_REG16(data_reg+ (reg & 2)); break;
357 case 4: data = READ_REG32(data_reg); break;
358 }
359 }
360 LBA_CFG_RESTORE(d, d->hba.base_addr);
361 return(data);
362}
363
364
365static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
366{
367 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
368 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
369 u32 tok = LBA_CFG_TOK(local_bus, devfn);
370 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
371
372 if ((pos > 255) || (devfn > 255))
373 return -EINVAL;
374
375
376 {
377
378
379 *data = lba_rd_cfg(d, tok, pos, size);
380 DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __func__, tok, pos, *data);
381 return 0;
382 }
383
384 if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->secondary, devfn, d)) {
385 DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __func__, tok, pos);
386
387 *data = ~0U;
388 return(0);
389 }
390
391
392
393
394
395 LBA_CFG_ADDR_SETUP(d, tok | pos);
396 switch(size) {
397 case 1: *data = READ_REG8 (data_reg + (pos & 3)); break;
398 case 2: *data = READ_REG16(data_reg + (pos & 2)); break;
399 case 4: *data = READ_REG32(data_reg); break;
400 }
401 DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __func__, tok, pos, *data);
402 return 0;
403}
404
405
406static void
407lba_wr_cfg(struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size)
408{
409 int error = 0;
410 u32 arb_mask = 0;
411 u32 error_config = 0;
412 u32 status_control = 0;
413 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
414
415 LBA_CFG_SETUP(d, tok);
416 LBA_CFG_ADDR_SETUP(d, tok | reg);
417 switch (size) {
418 case 1: WRITE_REG8 (data, data_reg + (reg & 3)); break;
419 case 2: WRITE_REG16(data, data_reg + (reg & 2)); break;
420 case 4: WRITE_REG32(data, data_reg); break;
421 }
422 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
423 LBA_CFG_RESTORE(d, d->hba.base_addr);
424}
425
426
427
428
429
430
431
432static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
433{
434 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
435 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
436 u32 tok = LBA_CFG_TOK(local_bus,devfn);
437
438 if ((pos > 255) || (devfn > 255))
439 return -EINVAL;
440
441 if (!LBA_SKIP_PROBE(d)) {
442
443 lba_wr_cfg(d, tok, pos, (u32) data, size);
444 DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __func__, tok, pos,data);
445 return 0;
446 }
447
448 if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->secondary, devfn, d))) {
449 DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __func__, tok, pos,data);
450 return 1;
451 }
452
453 DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __func__, tok, pos, data);
454
455
456 LBA_CFG_ADDR_SETUP(d, tok | pos);
457 switch(size) {
458 case 1: WRITE_REG8 (data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 3));
459 break;
460 case 2: WRITE_REG16(data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 2));
461 break;
462 case 4: WRITE_REG32(data, d->hba.base_addr + LBA_PCI_CFG_DATA);
463 break;
464 }
465
466 lba_t32 = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
467 return 0;
468}
469
470
471static struct pci_ops elroy_cfg_ops = {
472 .read = elroy_cfg_read,
473 .write = elroy_cfg_write,
474};
475
476
477
478
479
480
481
482static int mercury_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
483{
484 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
485 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
486 u32 tok = LBA_CFG_TOK(local_bus, devfn);
487 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
488
489 if ((pos > 255) || (devfn > 255))
490 return -EINVAL;
491
492 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
493 switch(size) {
494 case 1:
495 *data = READ_REG8(data_reg + (pos & 3));
496 break;
497 case 2:
498 *data = READ_REG16(data_reg + (pos & 2));
499 break;
500 case 4:
501 *data = READ_REG32(data_reg); break;
502 break;
503 }
504
505 DBG_CFG("mercury_cfg_read(%x+%2x) -> 0x%x\n", tok, pos, *data);
506 return 0;
507}
508
509
510
511
512
513
514static int mercury_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
515{
516 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
517 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
518 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
519 u32 tok = LBA_CFG_TOK(local_bus,devfn);
520
521 if ((pos > 255) || (devfn > 255))
522 return -EINVAL;
523
524 DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __func__, tok, pos, data);
525
526 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
527 switch(size) {
528 case 1:
529 WRITE_REG8 (data, data_reg + (pos & 3));
530 break;
531 case 2:
532 WRITE_REG16(data, data_reg + (pos & 2));
533 break;
534 case 4:
535 WRITE_REG32(data, data_reg);
536 break;
537 }
538
539
540 lba_t32 = READ_U32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
541 return 0;
542}
543
544static struct pci_ops mercury_cfg_ops = {
545 .read = mercury_cfg_read,
546 .write = mercury_cfg_write,
547};
548
549
550static void
551lba_bios_init(void)
552{
553 DBG(MODULE_NAME ": lba_bios_init\n");
554}
555
556
557#ifdef CONFIG_64BIT
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572static unsigned long
573truncate_pat_collision(struct resource *root, struct resource *new)
574{
575 unsigned long start = new->start;
576 unsigned long end = new->end;
577 struct resource *tmp = root->child;
578
579 if (end <= start || start < root->start || !tmp)
580 return 0;
581
582
583 while (tmp && tmp->end < start)
584 tmp = tmp->sibling;
585
586
587 if (!tmp) return 0;
588
589
590
591
592 if (tmp->start >= end) return 0;
593
594 if (tmp->start <= start) {
595
596 new->start = tmp->end + 1;
597
598 if (tmp->end >= end) {
599
600 return 1;
601 }
602 }
603
604 if (tmp->end < end ) {
605
606 new->end = tmp->start - 1;
607 }
608
609 printk(KERN_WARNING "LBA: Truncating lmmio_space [%lx/%lx] "
610 "to [%lx,%lx]\n",
611 start, end,
612 (long)new->start, (long)new->end );
613
614 return 0;
615}
616
617#else
618#define truncate_pat_collision(r,n) (0)
619#endif
620
621
622
623
624
625
626
627
628
629
630static void
631lba_fixup_bus(struct pci_bus *bus)
632{
633 struct list_head *ln;
634#ifdef FBB_SUPPORT
635 u16 status;
636#endif
637 struct lba_device *ldev = LBA_DEV(parisc_walk_tree(bus->bridge));
638 int lba_portbase = HBA_PORT_BASE(ldev->hba.hba_num);
639
640 DBG("lba_fixup_bus(0x%p) bus %d platform_data 0x%p\n",
641 bus, bus->secondary, bus->bridge->platform_data);
642
643
644
645
646
647 if (bus->parent) {
648 int i;
649
650 pci_read_bridge_bases(bus);
651 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
652 pci_claim_resource(bus->self, i);
653 }
654 } else {
655
656 int err, i;
657
658 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
659 ldev->hba.io_space.name,
660 ldev->hba.io_space.start, ldev->hba.io_space.end,
661 ldev->hba.io_space.flags);
662 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
663 ldev->hba.lmmio_space.name,
664 ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end,
665 ldev->hba.lmmio_space.flags);
666
667 err = request_resource(&ioport_resource, &(ldev->hba.io_space));
668 if (err < 0) {
669 lba_dump_res(&ioport_resource, 2);
670 BUG();
671 }
672
673 bus->resource[0] = &(ldev->hba.io_space);
674 i = 1;
675
676 if (ldev->hba.elmmio_space.start) {
677 err = request_resource(&iomem_resource,
678 &(ldev->hba.elmmio_space));
679 if (err < 0) {
680
681 printk("FAILED: lba_fixup_bus() request for "
682 "elmmio_space [%lx/%lx]\n",
683 (long)ldev->hba.elmmio_space.start,
684 (long)ldev->hba.elmmio_space.end);
685
686
687
688 } else
689 bus->resource[i++] = &(ldev->hba.elmmio_space);
690 }
691
692
693
694
695
696
697
698
699
700
701
702 if (truncate_pat_collision(&iomem_resource,
703 &(ldev->hba.lmmio_space))) {
704
705 printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",
706 (long)ldev->hba.lmmio_space.start,
707 (long)ldev->hba.lmmio_space.end);
708 } else {
709 err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
710 if (err < 0) {
711 printk(KERN_ERR "FAILED: lba_fixup_bus() request for "
712 "lmmio_space [%lx/%lx]\n",
713 (long)ldev->hba.lmmio_space.start,
714 (long)ldev->hba.lmmio_space.end);
715 } else
716 bus->resource[i++] = &(ldev->hba.lmmio_space);
717 }
718
719#ifdef CONFIG_64BIT
720
721 if (ldev->hba.gmmio_space.flags) {
722 err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space));
723 if (err < 0) {
724 printk("FAILED: lba_fixup_bus() request for "
725 "gmmio_space [%lx/%lx]\n",
726 (long)ldev->hba.gmmio_space.start,
727 (long)ldev->hba.gmmio_space.end);
728 lba_dump_res(&iomem_resource, 2);
729 BUG();
730 }
731 bus->resource[i++] = &(ldev->hba.gmmio_space);
732 }
733#endif
734
735 }
736
737 list_for_each(ln, &bus->devices) {
738 int i;
739 struct pci_dev *dev = pci_dev_b(ln);
740
741 DBG("lba_fixup_bus() %s\n", pci_name(dev));
742
743
744 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
745 struct resource *res = &dev->resource[i];
746
747
748 if (!res->start)
749 continue;
750
751 if (res->flags & IORESOURCE_IO) {
752 DBG("lba_fixup_bus() I/O Ports [%lx/%lx] -> ",
753 res->start, res->end);
754 res->start |= lba_portbase;
755 res->end |= lba_portbase;
756 DBG("[%lx/%lx]\n", res->start, res->end);
757 } else if (res->flags & IORESOURCE_MEM) {
758
759
760
761
762 DBG("lba_fixup_bus() MMIO [%lx/%lx] -> ",
763 res->start, res->end);
764 res->start = PCI_HOST_ADDR(HBA_DATA(ldev), res->start);
765 res->end = PCI_HOST_ADDR(HBA_DATA(ldev), res->end);
766 DBG("[%lx/%lx]\n", res->start, res->end);
767 } else {
768 DBG("lba_fixup_bus() WTF? 0x%lx [%lx/%lx] XXX",
769 res->flags, res->start, res->end);
770 }
771
772
773
774
775
776
777 pci_claim_resource(dev, i);
778 }
779
780#ifdef FBB_SUPPORT
781
782
783
784
785 (void) pci_read_config_word(dev, PCI_STATUS, &status);
786 bus->bridge_ctl &= ~(status & PCI_STATUS_FAST_BACK);
787#endif
788
789
790
791
792 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
793 continue;
794
795
796 iosapic_fixup_irq(ldev->iosapic_obj, dev);
797 }
798
799#ifdef FBB_SUPPORT
800
801
802
803
804 if (fbb_enable) {
805 if (bus->parent) {
806 u8 control;
807
808 (void) pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &control);
809 (void) pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, control | PCI_STATUS_FAST_BACK);
810
811 } else {
812
813 }
814 fbb_enable = PCI_COMMAND_FAST_BACK;
815 }
816
817
818 list_for_each(ln, &bus->devices) {
819 (void) pci_read_config_word(dev, PCI_COMMAND, &status);
820 status |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR | fbb_enable;
821 (void) pci_write_config_word(dev, PCI_COMMAND, status);
822 }
823#endif
824}
825
826
827static struct pci_bios_ops lba_bios_ops = {
828 .init = lba_bios_init,
829 .fixup_bus = lba_fixup_bus,
830};
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849#define LBA_PORT_IN(size, mask) \
850static u##size lba_astro_in##size (struct pci_hba_data *d, u16 addr) \
851{ \
852 u##size t; \
853 t = READ_REG##size(astro_iop_base + addr); \
854 DBG_PORT(" 0x%x\n", t); \
855 return (t); \
856}
857
858LBA_PORT_IN( 8, 3)
859LBA_PORT_IN(16, 2)
860LBA_PORT_IN(32, 0)
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890#define LBA_PORT_OUT(size, mask) \
891static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
892{ \
893 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, d, addr, val); \
894 WRITE_REG##size(val, astro_iop_base + addr); \
895 if (LBA_DEV(d)->hw_rev < 3) \
896 lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \
897}
898
899LBA_PORT_OUT( 8, 3)
900LBA_PORT_OUT(16, 2)
901LBA_PORT_OUT(32, 0)
902
903
904static struct pci_port_ops lba_astro_port_ops = {
905 .inb = lba_astro_in8,
906 .inw = lba_astro_in16,
907 .inl = lba_astro_in32,
908 .outb = lba_astro_out8,
909 .outw = lba_astro_out16,
910 .outl = lba_astro_out32
911};
912
913
914#ifdef CONFIG_64BIT
915#define PIOP_TO_GMMIO(lba, addr) \
916 ((lba)->iop_base + (((addr)&0xFFFC)<<10) + ((addr)&3))
917
918
919
920
921
922
923
924
925
926
927
928
929
930#undef LBA_PORT_IN
931#define LBA_PORT_IN(size, mask) \
932static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \
933{ \
934 u##size t; \
935 DBG_PORT("%s(0x%p, 0x%x) ->", __func__, l, addr); \
936 t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \
937 DBG_PORT(" 0x%x\n", t); \
938 return (t); \
939}
940
941LBA_PORT_IN( 8, 3)
942LBA_PORT_IN(16, 2)
943LBA_PORT_IN(32, 0)
944
945
946#undef LBA_PORT_OUT
947#define LBA_PORT_OUT(size, mask) \
948static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \
949{ \
950 void __iomem *where = PIOP_TO_GMMIO(LBA_DEV(l), addr); \
951 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, l, addr, val); \
952 WRITE_REG##size(val, where); \
953 \
954 lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \
955}
956
957LBA_PORT_OUT( 8, 3)
958LBA_PORT_OUT(16, 2)
959LBA_PORT_OUT(32, 0)
960
961
962static struct pci_port_ops lba_pat_port_ops = {
963 .inb = lba_pat_in8,
964 .inw = lba_pat_in16,
965 .inl = lba_pat_in32,
966 .outb = lba_pat_out8,
967 .outw = lba_pat_out16,
968 .outl = lba_pat_out32
969};
970
971
972
973
974
975
976
977
978
979static void
980lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
981{
982 unsigned long bytecnt;
983 pdc_pat_cell_mod_maddr_block_t pa_pdc_cell;
984 pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
985 long io_count;
986 long status;
987 long pa_count;
988 int i;
989
990
991 status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
992 PA_VIEW, & pa_pdc_cell);
993 pa_count = pa_pdc_cell.mod[1];
994
995 status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
996 IO_VIEW, &io_pdc_cell);
997 io_count = io_pdc_cell.mod[1];
998
999
1000 if (status != PDC_OK) {
1001 panic("pdc_pat_cell_module() call failed for LBA!\n");
1002 }
1003
1004 if (PAT_GET_ENTITY(pa_pdc_cell.mod_info) != PAT_ENTITY_LBA) {
1005 panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n");
1006 }
1007
1008
1009
1010
1011 for (i = 0; i < pa_count; i++) {
1012 struct {
1013 unsigned long type;
1014 unsigned long start;
1015 unsigned long end;
1016 } *p, *io;
1017 struct resource *r;
1018
1019 p = (void *) &(pa_pdc_cell.mod[2+i*3]);
1020 io = (void *) &(io_pdc_cell.mod[2+i*3]);
1021
1022
1023 switch(p->type & 0xff) {
1024 case PAT_PBNUM:
1025 lba_dev->hba.bus_num.start = p->start;
1026 lba_dev->hba.bus_num.end = p->end;
1027 break;
1028
1029 case PAT_LMMIO:
1030
1031 if (!lba_dev->hba.lmmio_space.start) {
1032 sprintf(lba_dev->hba.lmmio_name,
1033 "PCI%02x LMMIO",
1034 (int)lba_dev->hba.bus_num.start);
1035 lba_dev->hba.lmmio_space_offset = p->start -
1036 io->start;
1037 r = &lba_dev->hba.lmmio_space;
1038 r->name = lba_dev->hba.lmmio_name;
1039 } else if (!lba_dev->hba.elmmio_space.start) {
1040 sprintf(lba_dev->hba.elmmio_name,
1041 "PCI%02x ELMMIO",
1042 (int)lba_dev->hba.bus_num.start);
1043 r = &lba_dev->hba.elmmio_space;
1044 r->name = lba_dev->hba.elmmio_name;
1045 } else {
1046 printk(KERN_WARNING MODULE_NAME
1047 " only supports 2 LMMIO resources!\n");
1048 break;
1049 }
1050
1051 r->start = p->start;
1052 r->end = p->end;
1053 r->flags = IORESOURCE_MEM;
1054 r->parent = r->sibling = r->child = NULL;
1055 break;
1056
1057 case PAT_GMMIO:
1058
1059 sprintf(lba_dev->hba.gmmio_name, "PCI%02x GMMIO",
1060 (int)lba_dev->hba.bus_num.start);
1061 r = &lba_dev->hba.gmmio_space;
1062 r->name = lba_dev->hba.gmmio_name;
1063 r->start = p->start;
1064 r->end = p->end;
1065 r->flags = IORESOURCE_MEM;
1066 r->parent = r->sibling = r->child = NULL;
1067 break;
1068
1069 case PAT_NPIOP:
1070 printk(KERN_WARNING MODULE_NAME
1071 " range[%d] : ignoring NPIOP (0x%lx)\n",
1072 i, p->start);
1073 break;
1074
1075 case PAT_PIOP:
1076
1077
1078
1079
1080 lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024);
1081
1082 sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
1083 (int)lba_dev->hba.bus_num.start);
1084 r = &lba_dev->hba.io_space;
1085 r->name = lba_dev->hba.io_name;
1086 r->start = HBA_PORT_BASE(lba_dev->hba.hba_num);
1087 r->end = r->start + HBA_PORT_SPACE_SIZE - 1;
1088 r->flags = IORESOURCE_IO;
1089 r->parent = r->sibling = r->child = NULL;
1090 break;
1091
1092 default:
1093 printk(KERN_WARNING MODULE_NAME
1094 " range[%d] : unknown pat range type (0x%lx)\n",
1095 i, p->type & 0xff);
1096 break;
1097 }
1098 }
1099}
1100#else
1101
1102#define lba_pat_port_ops lba_astro_port_ops
1103#define lba_pat_resources(pa_dev, lba_dev)
1104#endif
1105
1106
1107extern void sba_distributed_lmmio(struct parisc_device *, struct resource *);
1108extern void sba_directed_lmmio(struct parisc_device *, struct resource *);
1109
1110
1111static void
1112lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1113{
1114 struct resource *r;
1115 int lba_num;
1116
1117 lba_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
1118
1119
1120
1121
1122
1123
1124
1125
1126 lba_num = READ_REG32(lba_dev->hba.base_addr + LBA_FW_SCRATCH);
1127 r = &(lba_dev->hba.bus_num);
1128 r->name = "LBA PCI Busses";
1129 r->start = lba_num & 0xff;
1130 r->end = (lba_num>>8) & 0xff;
1131
1132
1133
1134
1135 r = &(lba_dev->hba.lmmio_space);
1136 sprintf(lba_dev->hba.lmmio_name, "PCI%02x LMMIO",
1137 (int)lba_dev->hba.bus_num.start);
1138 r->name = lba_dev->hba.lmmio_name;
1139
1140#if 1
1141
1142
1143
1144
1145 sba_distributed_lmmio(pa_dev, r);
1146#else
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_BASE);
1208 if (r->start & 1) {
1209 unsigned long rsize;
1210
1211 r->flags = IORESOURCE_MEM;
1212
1213 r->start &= mmio_mask;
1214 r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
1215 rsize = ~ READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_MASK);
1216
1217
1218
1219
1220
1221 rsize /= ROPES_PER_IOC;
1222 r->start += (rsize + 1) * LBA_NUM(pa_dev->hpa.start);
1223 r->end = r->start + rsize;
1224 } else {
1225 r->end = r->start = 0;
1226 }
1227#endif
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244 r = &(lba_dev->hba.elmmio_space);
1245 sprintf(lba_dev->hba.elmmio_name, "PCI%02x ELMMIO",
1246 (int)lba_dev->hba.bus_num.start);
1247 r->name = lba_dev->hba.elmmio_name;
1248
1249#if 1
1250
1251 sba_directed_lmmio(pa_dev, r);
1252#else
1253 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_BASE);
1254
1255 if (r->start & 1) {
1256 unsigned long rsize;
1257 r->flags = IORESOURCE_MEM;
1258
1259 r->start &= mmio_mask;
1260 r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
1261 rsize = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_MASK);
1262 r->end = r->start + ~rsize;
1263 }
1264#endif
1265
1266 r = &(lba_dev->hba.io_space);
1267 sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
1268 (int)lba_dev->hba.bus_num.start);
1269 r->name = lba_dev->hba.io_name;
1270 r->flags = IORESOURCE_IO;
1271 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_IOS_BASE) & ~1L;
1272 r->end = r->start + (READ_REG32(lba_dev->hba.base_addr + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1));
1273
1274
1275 lba_num = HBA_PORT_BASE(lba_dev->hba.hba_num);
1276 r->start |= lba_num;
1277 r->end |= lba_num;
1278}
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293static int __init
1294lba_hw_init(struct lba_device *d)
1295{
1296 u32 stat;
1297 u32 bus_reset;
1298
1299#if 0
1300 printk(KERN_DEBUG "LBA %lx STAT_CTL %Lx ERROR_CFG %Lx STATUS %Lx DMA_CTL %Lx\n",
1301 d->hba.base_addr,
1302 READ_REG64(d->hba.base_addr + LBA_STAT_CTL),
1303 READ_REG64(d->hba.base_addr + LBA_ERROR_CONFIG),
1304 READ_REG64(d->hba.base_addr + LBA_ERROR_STATUS),
1305 READ_REG64(d->hba.base_addr + LBA_DMA_CTL) );
1306 printk(KERN_DEBUG " ARB mask %Lx pri %Lx mode %Lx mtlt %Lx\n",
1307 READ_REG64(d->hba.base_addr + LBA_ARB_MASK),
1308 READ_REG64(d->hba.base_addr + LBA_ARB_PRI),
1309 READ_REG64(d->hba.base_addr + LBA_ARB_MODE),
1310 READ_REG64(d->hba.base_addr + LBA_ARB_MTLT) );
1311 printk(KERN_DEBUG " HINT cfg 0x%Lx\n",
1312 READ_REG64(d->hba.base_addr + LBA_HINT_CFG));
1313 printk(KERN_DEBUG " HINT reg ");
1314 { int i;
1315 for (i=LBA_HINT_BASE; i< (14*8 + LBA_HINT_BASE); i+=8)
1316 printk(" %Lx", READ_REG64(d->hba.base_addr + i));
1317 }
1318 printk("\n");
1319#endif
1320
1321#ifdef CONFIG_64BIT
1322
1323
1324
1325
1326
1327#endif
1328
1329
1330 bus_reset = READ_REG32(d->hba.base_addr + LBA_STAT_CTL + 4) & 1;
1331 if (bus_reset) {
1332 printk(KERN_DEBUG "NOTICE: PCI bus reset still asserted! (clearing)\n");
1333 }
1334
1335 stat = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG);
1336 if (stat & LBA_SMART_MODE) {
1337 printk(KERN_DEBUG "NOTICE: LBA in SMART mode! (cleared)\n");
1338 stat &= ~LBA_SMART_MODE;
1339 WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
1340 }
1341
1342
1343 stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
1344 WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
1345
1346
1347
1348
1349
1350
1351 if (bus_reset)
1352 mdelay(pci_post_reset_delay);
1353
1354 if (0 == READ_REG32(d->hba.base_addr + LBA_ARB_MASK)) {
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364 printk(KERN_DEBUG "NOTICE: Enabling PCI Arbitration\n");
1365 WRITE_REG32(0x3, d->hba.base_addr + LBA_ARB_MASK);
1366 }
1367
1368
1369
1370
1371
1372
1373 return 0;
1374}
1375
1376
1377
1378
1379
1380
1381
1382
1383static unsigned int lba_next_bus = 0;
1384
1385
1386
1387
1388
1389
1390static int __init
1391lba_driver_probe(struct parisc_device *dev)
1392{
1393 struct lba_device *lba_dev;
1394 struct pci_bus *lba_bus;
1395 struct pci_ops *cfg_ops;
1396 u32 func_class;
1397 void *tmp_obj;
1398 char *version;
1399 void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096);
1400
1401
1402 func_class = READ_REG32(addr + LBA_FCLASS);
1403
1404 if (IS_ELROY(dev)) {
1405 func_class &= 0xf;
1406 switch (func_class) {
1407 case 0: version = "TR1.0"; break;
1408 case 1: version = "TR2.0"; break;
1409 case 2: version = "TR2.1"; break;
1410 case 3: version = "TR2.2"; break;
1411 case 4: version = "TR3.0"; break;
1412 case 5: version = "TR4.0"; break;
1413 default: version = "TR4+";
1414 }
1415
1416 printk(KERN_INFO "Elroy version %s (0x%x) found at 0x%lx\n",
1417 version, func_class & 0xf, (long)dev->hpa.start);
1418
1419 if (func_class < 2) {
1420 printk(KERN_WARNING "Can't support LBA older than "
1421 "TR2.1 - continuing under adversity.\n");
1422 }
1423
1424#if 0
1425
1426
1427
1428 if (func_class > 4) {
1429 cfg_ops = &mercury_cfg_ops;
1430 } else
1431#endif
1432 {
1433 cfg_ops = &elroy_cfg_ops;
1434 }
1435
1436 } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) {
1437 int major, minor;
1438
1439 func_class &= 0xff;
1440 major = func_class >> 4, minor = func_class & 0xf;
1441
1442
1443
1444
1445 printk(KERN_INFO "%s version TR%d.%d (0x%x) found at 0x%lx\n",
1446 IS_MERCURY(dev) ? "Mercury" : "Quicksilver", major,
1447 minor, func_class, (long)dev->hpa.start);
1448
1449 cfg_ops = &mercury_cfg_ops;
1450 } else {
1451 printk(KERN_ERR "Unknown LBA found at 0x%lx\n",
1452 (long)dev->hpa.start);
1453 return -ENODEV;
1454 }
1455
1456
1457 tmp_obj = iosapic_register(dev->hpa.start + LBA_IOSAPIC_BASE);
1458
1459
1460
1461
1462
1463 lba_dev = kzalloc(sizeof(struct lba_device), GFP_KERNEL);
1464 if (!lba_dev) {
1465 printk(KERN_ERR "lba_init_chip - couldn't alloc lba_device\n");
1466 return(1);
1467 }
1468
1469
1470
1471
1472 lba_dev->hw_rev = func_class;
1473 lba_dev->hba.base_addr = addr;
1474 lba_dev->hba.dev = dev;
1475 lba_dev->iosapic_obj = tmp_obj;
1476 lba_dev->hba.iommu = sba_get_iommu(dev);
1477 parisc_set_drvdata(dev, lba_dev);
1478
1479
1480 pci_bios = &lba_bios_ops;
1481 pcibios_register_hba(HBA_DATA(lba_dev));
1482 spin_lock_init(&lba_dev->lba_lock);
1483
1484 if (lba_hw_init(lba_dev))
1485 return(1);
1486
1487
1488
1489 if (is_pdc_pat()) {
1490
1491 pci_port = &lba_pat_port_ops;
1492
1493 lba_pat_resources(dev, lba_dev);
1494 } else {
1495 if (!astro_iop_base) {
1496
1497 astro_iop_base = ioremap_nocache(LBA_PORT_BASE, 64 * 1024);
1498 pci_port = &lba_astro_port_ops;
1499 }
1500
1501
1502 lba_legacy_resources(dev, lba_dev);
1503 }
1504
1505 if (lba_dev->hba.bus_num.start < lba_next_bus)
1506 lba_dev->hba.bus_num.start = lba_next_bus;
1507
1508 dev->dev.platform_data = lba_dev;
1509 lba_bus = lba_dev->hba.hba_bus =
1510 pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start,
1511 cfg_ops, NULL);
1512 if (lba_bus) {
1513 lba_next_bus = lba_bus->subordinate + 1;
1514 pci_bus_add_devices(lba_bus);
1515 }
1516
1517
1518 if (is_pdc_pat()) {
1519
1520
1521 DBG_PAT("LBA pci_bus_size_bridges()\n");
1522 pci_bus_size_bridges(lba_bus);
1523
1524 DBG_PAT("LBA pci_bus_assign_resources()\n");
1525 pci_bus_assign_resources(lba_bus);
1526
1527#ifdef DEBUG_LBA_PAT
1528 DBG_PAT("\nLBA PIOP resource tree\n");
1529 lba_dump_res(&lba_dev->hba.io_space, 2);
1530 DBG_PAT("\nLBA LMMIO resource tree\n");
1531 lba_dump_res(&lba_dev->hba.lmmio_space, 2);
1532#endif
1533 }
1534 pci_enable_bridges(lba_bus);
1535
1536
1537
1538
1539
1540
1541
1542 if (cfg_ops == &elroy_cfg_ops) {
1543 lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
1544 }
1545
1546
1547 return 0;
1548}
1549
1550static struct parisc_device_id lba_tbl[] = {
1551 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, ELROY_HVERS, 0xa },
1552 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, MERCURY_HVERS, 0xa },
1553 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, QUICKSILVER_HVERS, 0xa },
1554 { 0, }
1555};
1556
1557static struct parisc_driver lba_driver = {
1558 .name = MODULE_NAME,
1559 .id_table = lba_tbl,
1560 .probe = lba_driver_probe,
1561};
1562
1563
1564
1565
1566
1567void __init lba_init(void)
1568{
1569 register_parisc_driver(&lba_driver);
1570}
1571
1572
1573
1574
1575
1576
1577void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
1578{
1579 void __iomem * base_addr = ioremap_nocache(lba->hpa.start, 4096);
1580
1581 imask <<= 2;
1582
1583
1584 WARN_ON((ibase & 0x001fffff) != 0);
1585 WARN_ON((imask & 0x001fffff) != 0);
1586
1587 DBG("%s() ibase 0x%x imask 0x%x\n", __func__, ibase, imask);
1588 WRITE_REG32( imask, base_addr + LBA_IMASK);
1589 WRITE_REG32( ibase, base_addr + LBA_IBASE);
1590 iounmap(base_addr);
1591}
1592
1593