1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/spinlock.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27
28#include <linux/mm.h>
29#include <linux/string.h>
30#include <linux/pci.h>
31#include <linux/scatterlist.h>
32#include <linux/iommu-helper.h>
33
34#include <asm/byteorder.h>
35#include <asm/io.h>
36#include <asm/dma.h>
37
38#include <asm/hardware.h>
39
40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
42
43#include <asm/ropes.h>
44#include <asm/mckinley.h>
45#include <asm/runway.h>
46#include <asm/pdc.h>
47#include <asm/pdcpat.h>
48#include <asm/parisc-device.h>
49
50#define MODULE_NAME "SBA"
51
52
53
54
55
56
57#undef DEBUG_SBA_INIT
58#undef DEBUG_SBA_RUN
59#undef DEBUG_SBA_RUN_SG
60#undef DEBUG_SBA_RESOURCE
61#undef ASSERT_PDIR_SANITY
62#undef DEBUG_LARGE_SG_ENTRIES
63#undef DEBUG_DMB_TRAP
64
65#ifdef DEBUG_SBA_INIT
66#define DBG_INIT(x...) printk(x)
67#else
68#define DBG_INIT(x...)
69#endif
70
71#ifdef DEBUG_SBA_RUN
72#define DBG_RUN(x...) printk(x)
73#else
74#define DBG_RUN(x...)
75#endif
76
77#ifdef DEBUG_SBA_RUN_SG
78#define DBG_RUN_SG(x...) printk(x)
79#else
80#define DBG_RUN_SG(x...)
81#endif
82
83
84#ifdef DEBUG_SBA_RESOURCE
85#define DBG_RES(x...) printk(x)
86#else
87#define DBG_RES(x...)
88#endif
89
90#define SBA_INLINE __inline__
91
92#define DEFAULT_DMA_HINT_REG 0
93
94struct sba_device *sba_list;
95EXPORT_SYMBOL_GPL(sba_list);
96
97static unsigned long ioc_needs_fdc = 0;
98
99
100static unsigned int global_ioc_cnt = 0;
101
102
103static unsigned long piranha_bad_128k = 0;
104
105
106#define SBA_DEV(d) ((struct sba_device *) (d))
107
108#ifdef CONFIG_AGP_PARISC
109#define SBA_AGP_SUPPORT
110#endif
111
112#ifdef SBA_AGP_SUPPORT
113static int sba_reserve_agpgart = 1;
114module_param(sba_reserve_agpgart, int, 0444);
115MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
116#endif
117
118
119
120
121
122
123
124
125
126
127#define READ_REG32(addr) readl(addr)
128#define READ_REG64(addr) readq(addr)
129#define WRITE_REG32(val, addr) writel((val), (addr))
130#define WRITE_REG64(val, addr) writeq((val), (addr))
131
132#ifdef CONFIG_64BIT
133#define READ_REG(addr) READ_REG64(addr)
134#define WRITE_REG(value, addr) WRITE_REG64(value, addr)
135#else
136#define READ_REG(addr) READ_REG32(addr)
137#define WRITE_REG(value, addr) WRITE_REG32(value, addr)
138#endif
139
140#ifdef DEBUG_SBA_INIT
141
142
143
144
145
146
147
148
149
150
151static void
152sba_dump_ranges(void __iomem *hpa)
153{
154 DBG_INIT("SBA at 0x%p\n", hpa);
155 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE));
156 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK));
157 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE));
158 DBG_INIT("\n");
159 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE));
160 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK));
161 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE));
162}
163
164
165
166
167
168
169
170static void sba_dump_tlb(void __iomem *hpa)
171{
172 DBG_INIT("IO TLB at 0x%p\n", hpa);
173 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
174 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
175 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
176 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
177 DBG_INIT("\n");
178}
179#else
180#define sba_dump_ranges(x)
181#define sba_dump_tlb(x)
182#endif
183
184
185#ifdef ASSERT_PDIR_SANITY
186
187
188
189
190
191
192
193
194
195static void
196sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
197{
198
199 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
200 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
201 uint rcnt;
202
203 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
204 msg,
205 rptr, pide & (BITS_PER_LONG - 1), *rptr);
206
207 rcnt = 0;
208 while (rcnt < BITS_PER_LONG) {
209 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
210 (rcnt == (pide & (BITS_PER_LONG - 1)))
211 ? " -->" : " ",
212 rcnt, ptr, *ptr );
213 rcnt++;
214 ptr++;
215 }
216 printk(KERN_DEBUG "%s", msg);
217}
218
219
220
221
222
223
224
225
226
227static int
228sba_check_pdir(struct ioc *ioc, char *msg)
229{
230 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
231 u32 *rptr = (u32 *) ioc->res_map;
232 u64 *pptr = ioc->pdir_base;
233 uint pide = 0;
234
235 while (rptr < rptr_end) {
236 u32 rval = *rptr;
237 int rcnt = 32;
238
239 while (rcnt) {
240
241 u32 pde = ((u32) (((char *)pptr)[7])) << 24;
242 if ((rval ^ pde) & 0x80000000)
243 {
244
245
246
247
248 sba_dump_pdir_entry(ioc, msg, pide);
249 return(1);
250 }
251 rcnt--;
252 rval <<= 1;
253 pptr++;
254 pide++;
255 }
256 rptr++;
257 }
258
259 return 0;
260}
261
262
263
264
265
266
267
268
269
270
271static void
272sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
273{
274 while (nents-- > 0) {
275 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n",
276 nents,
277 (unsigned long) sg_dma_address(startsg),
278 sg_dma_len(startsg),
279 sg_virt_addr(startsg), startsg->length);
280 startsg++;
281 }
282}
283
284#endif
285
286
287
288
289
290
291
292
293
294
295
296
297
298#define PAGES_PER_RANGE 1
299
300
301
302#ifdef ZX1_SUPPORT
303
304#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
305#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
306#else
307
308#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
309#define SBA_IOVP(ioc,iova) (iova)
310#endif
311
312#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
313
314#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
315#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
316
317static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
318 unsigned int bitshiftcnt)
319{
320 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
321 + bitshiftcnt;
322}
323
324
325
326
327
328
329
330
331
332
333static SBA_INLINE unsigned long
334sba_search_bitmap(struct ioc *ioc, struct device *dev,
335 unsigned long bits_wanted)
336{
337 unsigned long *res_ptr = ioc->res_hint;
338 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
339 unsigned long pide = ~0UL, tpide;
340 unsigned long boundary_size;
341 unsigned long shift;
342 int ret;
343
344 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
345 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
346
347#if defined(ZX1_SUPPORT)
348 BUG_ON(ioc->ibase & ~IOVP_MASK);
349 shift = ioc->ibase >> IOVP_SHIFT;
350#else
351 shift = 0;
352#endif
353
354 if (bits_wanted > (BITS_PER_LONG/2)) {
355
356 for(; res_ptr < res_end; ++res_ptr) {
357 tpide = ptr_to_pide(ioc, res_ptr, 0);
358 ret = iommu_is_span_boundary(tpide, bits_wanted,
359 shift,
360 boundary_size);
361 if ((*res_ptr == 0) && !ret) {
362 *res_ptr = RESMAP_MASK(bits_wanted);
363 pide = tpide;
364 break;
365 }
366 }
367
368 res_ptr++;
369 ioc->res_bitshift = 0;
370 } else {
371
372
373
374
375
376
377 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
378 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o);
379 unsigned long mask;
380
381 if (bitshiftcnt >= BITS_PER_LONG) {
382 bitshiftcnt = 0;
383 res_ptr++;
384 }
385 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
386
387 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
388 while(res_ptr < res_end)
389 {
390 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
391 WARN_ON(mask == 0);
392 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
393 ret = iommu_is_span_boundary(tpide, bits_wanted,
394 shift,
395 boundary_size);
396 if ((((*res_ptr) & mask) == 0) && !ret) {
397 *res_ptr |= mask;
398 pide = tpide;
399 break;
400 }
401 mask >>= o;
402 bitshiftcnt += o;
403 if (mask == 0) {
404 mask = RESMAP_MASK(bits_wanted);
405 bitshiftcnt=0;
406 res_ptr++;
407 }
408 }
409
410 ioc->res_bitshift = bitshiftcnt + bits_wanted;
411 }
412
413
414 if (res_end <= res_ptr) {
415 ioc->res_hint = (unsigned long *) ioc->res_map;
416 ioc->res_bitshift = 0;
417 } else {
418 ioc->res_hint = res_ptr;
419 }
420 return (pide);
421}
422
423
424
425
426
427
428
429
430
431
432static int
433sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
434{
435 unsigned int pages_needed = size >> IOVP_SHIFT;
436#ifdef SBA_COLLECT_STATS
437 unsigned long cr_start = mfctl(16);
438#endif
439 unsigned long pide;
440
441 pide = sba_search_bitmap(ioc, dev, pages_needed);
442 if (pide >= (ioc->res_size << 3)) {
443 pide = sba_search_bitmap(ioc, dev, pages_needed);
444 if (pide >= (ioc->res_size << 3))
445 panic("%s: I/O MMU @ %p is out of mapping resources\n",
446 __FILE__, ioc->ioc_hpa);
447 }
448
449#ifdef ASSERT_PDIR_SANITY
450
451 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
452 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
453 }
454#endif
455
456 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
457 __func__, size, pages_needed, pide,
458 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
459 ioc->res_bitshift );
460
461#ifdef SBA_COLLECT_STATS
462 {
463 unsigned long cr_end = mfctl(16);
464 unsigned long tmp = cr_end - cr_start;
465
466 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
467 }
468 ioc->avg_search[ioc->avg_idx++] = cr_start;
469 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
470
471 ioc->used_pages += pages_needed;
472#endif
473
474 return (pide);
475}
476
477
478
479
480
481
482
483
484
485
486static SBA_INLINE void
487sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
488{
489 unsigned long iovp = SBA_IOVP(ioc, iova);
490 unsigned int pide = PDIR_INDEX(iovp);
491 unsigned int ridx = pide >> 3;
492 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
493
494 int bits_not_wanted = size >> IOVP_SHIFT;
495
496
497 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
498
499 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
500 __func__, (uint) iova, size,
501 bits_not_wanted, m, pide, res_ptr, *res_ptr);
502
503#ifdef SBA_COLLECT_STATS
504 ioc->used_pages -= bits_not_wanted;
505#endif
506
507 *res_ptr &= ~m;
508}
509
510
511
512
513
514
515
516
517#ifdef SBA_HINT_SUPPORT
518#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
519#endif
520
521typedef unsigned long space_t;
522#define KERNEL_SPACE 0
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564static void SBA_INLINE
565sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
566 unsigned long hint)
567{
568 u64 pa;
569 register unsigned ci;
570
571 pa = virt_to_phys(vba);
572 pa &= IOVP_MASK;
573
574 mtsp(sid,1);
575 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
576 pa |= (ci >> 12) & 0xff;
577
578 pa |= SBA_PDIR_VALID_BIT;
579 *pdir_ptr = cpu_to_le64(pa);
580
581
582
583
584
585
586 if (ioc_needs_fdc)
587 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607static SBA_INLINE void
608sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
609{
610 u32 iovp = (u32) SBA_IOVP(ioc,iova);
611 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
612
613#ifdef ASSERT_PDIR_SANITY
614
615
616
617
618
619
620 if (0x80 != (((u8 *) pdir_ptr)[7])) {
621 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
622 }
623#endif
624
625 if (byte_cnt > IOVP_SIZE)
626 {
627#if 0
628 unsigned long entries_per_cacheline = ioc_needs_fdc ?
629 L1_CACHE_ALIGN(((unsigned long) pdir_ptr))
630 - (unsigned long) pdir_ptr;
631 : 262144;
632#endif
633
634
635 iovp |= get_order(byte_cnt) + PAGE_SHIFT;
636
637 do {
638
639 ((u8 *) pdir_ptr)[7] = 0;
640 if (ioc_needs_fdc) {
641 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
642#if 0
643 entries_per_cacheline = L1_CACHE_SHIFT - 3;
644#endif
645 }
646 pdir_ptr++;
647 byte_cnt -= IOVP_SIZE;
648 } while (byte_cnt > IOVP_SIZE);
649 } else
650 iovp |= IOVP_SHIFT;
651
652
653
654
655
656
657
658
659 ((u8 *) pdir_ptr)[7] = 0;
660 if (ioc_needs_fdc)
661 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
662
663 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
664}
665
666
667
668
669
670
671
672
673static int sba_dma_supported( struct device *dev, u64 mask)
674{
675 struct ioc *ioc;
676
677 if (dev == NULL) {
678 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
679 BUG();
680 return(0);
681 }
682
683
684
685
686
687
688 if (mask > ~0U)
689 return 0;
690
691 ioc = GET_IOC(dev);
692
693
694
695
696
697 return((int)(mask >= (ioc->ibase - 1 +
698 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
699}
700
701
702
703
704
705
706
707
708
709
710
711static dma_addr_t
712sba_map_single(struct device *dev, void *addr, size_t size,
713 enum dma_data_direction direction)
714{
715 struct ioc *ioc;
716 unsigned long flags;
717 dma_addr_t iovp;
718 dma_addr_t offset;
719 u64 *pdir_start;
720 int pide;
721
722 ioc = GET_IOC(dev);
723
724
725 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
726
727
728 size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
729
730 spin_lock_irqsave(&ioc->res_lock, flags);
731#ifdef ASSERT_PDIR_SANITY
732 sba_check_pdir(ioc,"Check before sba_map_single()");
733#endif
734
735#ifdef SBA_COLLECT_STATS
736 ioc->msingle_calls++;
737 ioc->msingle_pages += size >> IOVP_SHIFT;
738#endif
739 pide = sba_alloc_range(ioc, dev, size);
740 iovp = (dma_addr_t) pide << IOVP_SHIFT;
741
742 DBG_RUN("%s() 0x%p -> 0x%lx\n",
743 __func__, addr, (long) iovp | offset);
744
745 pdir_start = &(ioc->pdir_base[pide]);
746
747 while (size > 0) {
748 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
749
750 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
751 pdir_start,
752 (u8) (((u8 *) pdir_start)[7]),
753 (u8) (((u8 *) pdir_start)[6]),
754 (u8) (((u8 *) pdir_start)[5]),
755 (u8) (((u8 *) pdir_start)[4]),
756 (u8) (((u8 *) pdir_start)[3]),
757 (u8) (((u8 *) pdir_start)[2]),
758 (u8) (((u8 *) pdir_start)[1]),
759 (u8) (((u8 *) pdir_start)[0])
760 );
761
762 addr += IOVP_SIZE;
763 size -= IOVP_SIZE;
764 pdir_start++;
765 }
766
767
768 if (ioc_needs_fdc)
769 asm volatile("sync" : : );
770
771#ifdef ASSERT_PDIR_SANITY
772 sba_check_pdir(ioc,"Check after sba_map_single()");
773#endif
774 spin_unlock_irqrestore(&ioc->res_lock, flags);
775
776
777 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
778}
779
780
781
782
783
784
785
786
787
788
789
790static void
791sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
792 enum dma_data_direction direction)
793{
794 struct ioc *ioc;
795#if DELAYED_RESOURCE_CNT > 0
796 struct sba_dma_pair *d;
797#endif
798 unsigned long flags;
799 dma_addr_t offset;
800
801 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
802
803 ioc = GET_IOC(dev);
804 offset = iova & ~IOVP_MASK;
805 iova ^= offset;
806 size += offset;
807 size = ALIGN(size, IOVP_SIZE);
808
809 spin_lock_irqsave(&ioc->res_lock, flags);
810
811#ifdef SBA_COLLECT_STATS
812 ioc->usingle_calls++;
813 ioc->usingle_pages += size >> IOVP_SHIFT;
814#endif
815
816 sba_mark_invalid(ioc, iova, size);
817
818#if DELAYED_RESOURCE_CNT > 0
819
820
821
822 d = &(ioc->saved[ioc->saved_cnt]);
823 d->iova = iova;
824 d->size = size;
825 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
826 int cnt = ioc->saved_cnt;
827 while (cnt--) {
828 sba_free_range(ioc, d->iova, d->size);
829 d--;
830 }
831 ioc->saved_cnt = 0;
832
833 READ_REG(ioc->ioc_hpa+IOC_PCOM);
834 }
835#else
836 sba_free_range(ioc, iova, size);
837
838
839 if (ioc_needs_fdc)
840 asm volatile("sync" : : );
841
842 READ_REG(ioc->ioc_hpa+IOC_PCOM);
843#endif
844
845 spin_unlock_irqrestore(&ioc->res_lock, flags);
846
847
848
849
850
851
852
853
854
855}
856
857
858
859
860
861
862
863
864
865
866static void *sba_alloc_consistent(struct device *hwdev, size_t size,
867 dma_addr_t *dma_handle, gfp_t gfp)
868{
869 void *ret;
870
871 if (!hwdev) {
872
873 *dma_handle = 0;
874 return NULL;
875 }
876
877 ret = (void *) __get_free_pages(gfp, get_order(size));
878
879 if (ret) {
880 memset(ret, 0, size);
881 *dma_handle = sba_map_single(hwdev, ret, size, 0);
882 }
883
884 return ret;
885}
886
887
888
889
890
891
892
893
894
895
896
897static void
898sba_free_consistent(struct device *hwdev, size_t size, void *vaddr,
899 dma_addr_t dma_handle)
900{
901 sba_unmap_single(hwdev, dma_handle, size, 0);
902 free_pages((unsigned long) vaddr, get_order(size));
903}
904
905
906
907
908
909
910
911#define PIDE_FLAG 0x80000000UL
912
913#ifdef SBA_COLLECT_STATS
914#define IOMMU_MAP_STATS
915#endif
916#include "iommu-helpers.h"
917
918#ifdef DEBUG_LARGE_SG_ENTRIES
919int dump_run_sg = 0;
920#endif
921
922
923
924
925
926
927
928
929
930
931
932static int
933sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
934 enum dma_data_direction direction)
935{
936 struct ioc *ioc;
937 int coalesced, filled = 0;
938 unsigned long flags;
939
940 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
941
942 ioc = GET_IOC(dev);
943
944
945 if (nents == 1) {
946 sg_dma_address(sglist) = sba_map_single(dev,
947 (void *)sg_virt_addr(sglist),
948 sglist->length, direction);
949 sg_dma_len(sglist) = sglist->length;
950 return 1;
951 }
952
953 spin_lock_irqsave(&ioc->res_lock, flags);
954
955#ifdef ASSERT_PDIR_SANITY
956 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
957 {
958 sba_dump_sg(ioc, sglist, nents);
959 panic("Check before sba_map_sg()");
960 }
961#endif
962
963#ifdef SBA_COLLECT_STATS
964 ioc->msg_calls++;
965#endif
966
967
968
969
970
971
972
973
974
975 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
976
977
978
979
980
981
982
983
984
985 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
986
987
988 if (ioc_needs_fdc)
989 asm volatile("sync" : : );
990
991#ifdef ASSERT_PDIR_SANITY
992 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
993 {
994 sba_dump_sg(ioc, sglist, nents);
995 panic("Check after sba_map_sg()\n");
996 }
997#endif
998
999 spin_unlock_irqrestore(&ioc->res_lock, flags);
1000
1001 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1002
1003 return filled;
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016static void
1017sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1018 enum dma_data_direction direction)
1019{
1020 struct ioc *ioc;
1021#ifdef ASSERT_PDIR_SANITY
1022 unsigned long flags;
1023#endif
1024
1025 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1026 __func__, nents, sg_virt_addr(sglist), sglist->length);
1027
1028 ioc = GET_IOC(dev);
1029
1030#ifdef SBA_COLLECT_STATS
1031 ioc->usg_calls++;
1032#endif
1033
1034#ifdef ASSERT_PDIR_SANITY
1035 spin_lock_irqsave(&ioc->res_lock, flags);
1036 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1037 spin_unlock_irqrestore(&ioc->res_lock, flags);
1038#endif
1039
1040 while (sg_dma_len(sglist) && nents--) {
1041
1042 sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
1043#ifdef SBA_COLLECT_STATS
1044 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1045 ioc->usingle_calls--;
1046#endif
1047 ++sglist;
1048 }
1049
1050 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1051
1052#ifdef ASSERT_PDIR_SANITY
1053 spin_lock_irqsave(&ioc->res_lock, flags);
1054 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1055 spin_unlock_irqrestore(&ioc->res_lock, flags);
1056#endif
1057
1058}
1059
1060static struct hppa_dma_ops sba_ops = {
1061 .dma_supported = sba_dma_supported,
1062 .alloc_consistent = sba_alloc_consistent,
1063 .alloc_noncoherent = sba_alloc_consistent,
1064 .free_consistent = sba_free_consistent,
1065 .map_single = sba_map_single,
1066 .unmap_single = sba_unmap_single,
1067 .map_sg = sba_map_sg,
1068 .unmap_sg = sba_unmap_sg,
1069 .dma_sync_single_for_cpu = NULL,
1070 .dma_sync_single_for_device = NULL,
1071 .dma_sync_sg_for_cpu = NULL,
1072 .dma_sync_sg_for_device = NULL,
1073};
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085static void
1086sba_get_pat_resources(struct sba_device *sba_dev)
1087{
1088#if 0
1089
1090
1091
1092
1093
1094
1095PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp);
1096 FIXME : ???
1097PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp);
1098 Tells where the dvi bits are located in the address.
1099PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
1100 FIXME : ???
1101#endif
1102}
1103
1104
1105
1106
1107
1108
1109
1110#define PIRANHA_ADDR_MASK 0x00160000UL
1111#define PIRANHA_ADDR_VAL 0x00060000UL
1112static void *
1113sba_alloc_pdir(unsigned int pdir_size)
1114{
1115 unsigned long pdir_base;
1116 unsigned long pdir_order = get_order(pdir_size);
1117
1118 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
1119 if (NULL == (void *) pdir_base) {
1120 panic("%s() could not allocate I/O Page Table\n",
1121 __func__);
1122 }
1123
1124
1125
1126
1127
1128
1129
1130 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13
1131 || (boot_cpu_data.pdc.versions > 0x202)
1132 || (boot_cpu_data.pdc.capabilities & 0x08L) )
1133 return (void *) pdir_base;
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153 if (pdir_order <= (19-12)) {
1154 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) {
1155
1156 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12));
1157
1158 free_pages(pdir_base, pdir_order);
1159
1160 pdir_base = new_pdir;
1161
1162
1163 while (pdir_order < (19-12)) {
1164 new_pdir += pdir_size;
1165 free_pages(new_pdir, pdir_order);
1166 pdir_order +=1;
1167 pdir_size <<=1;
1168 }
1169 }
1170 } else {
1171
1172
1173
1174
1175 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1);
1176
1177
1178 free_pages( pdir_base, pdir_order);
1179
1180
1181 free_pages(new_pdir, 20-12);
1182
1183 pdir_base = new_pdir + 1024*1024;
1184
1185 if (pdir_order > (20-12)) {
1186
1187
1188
1189
1190
1191
1192 piranha_bad_128k = 1;
1193
1194 new_pdir += 3*1024*1024;
1195
1196 free_pages(new_pdir, 20-12);
1197
1198
1199 free_pages(new_pdir - 128*1024 , 17-12);
1200
1201 pdir_size -= 128*1024;
1202 }
1203 }
1204
1205 memset((void *) pdir_base, 0, pdir_size);
1206 return (void *) pdir_base;
1207}
1208
1209struct ibase_data_struct {
1210 struct ioc *ioc;
1211 int ioc_num;
1212};
1213
1214static int setup_ibase_imask_callback(struct device *dev, void *data)
1215{
1216
1217 extern void lba_set_iregs(struct parisc_device *, u32, u32);
1218 struct parisc_device *lba = to_parisc_device(dev);
1219 struct ibase_data_struct *ibd = data;
1220 int rope_num = (lba->hpa.start >> 13) & 0xf;
1221 if (rope_num >> 3 == ibd->ioc_num)
1222 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask);
1223 return 0;
1224}
1225
1226
1227static void
1228setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1229{
1230 struct ibase_data_struct ibase_data = {
1231 .ioc = ioc,
1232 .ioc_num = ioc_num,
1233 };
1234
1235 device_for_each_child(&sba->dev, &ibase_data,
1236 setup_ibase_imask_callback);
1237}
1238
1239#ifdef SBA_AGP_SUPPORT
1240static int
1241sba_ioc_find_quicksilver(struct device *dev, void *data)
1242{
1243 int *agp_found = data;
1244 struct parisc_device *lba = to_parisc_device(dev);
1245
1246 if (IS_QUICKSILVER(lba))
1247 *agp_found = 1;
1248 return 0;
1249}
1250#endif
1251
1252static void
1253sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1254{
1255 u32 iova_space_mask;
1256 u32 iova_space_size;
1257 int iov_order, tcnfg;
1258#ifdef SBA_AGP_SUPPORT
1259 int agp_found = 0;
1260#endif
1261
1262
1263
1264
1265
1266 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
1267 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1268
1269 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
1270 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
1271 iova_space_size /= 2;
1272 }
1273
1274
1275
1276
1277
1278 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
1279 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1280
1281 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
1282 __func__, ioc->ioc_hpa, iova_space_size >> 20,
1283 iov_order + PAGE_SHIFT);
1284
1285 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1286 get_order(ioc->pdir_size));
1287 if (!ioc->pdir_base)
1288 panic("Couldn't allocate I/O Page Table\n");
1289
1290 memset(ioc->pdir_base, 0, ioc->pdir_size);
1291
1292 DBG_INIT("%s() pdir %p size %x\n",
1293 __func__, ioc->pdir_base, ioc->pdir_size);
1294
1295#ifdef SBA_HINT_SUPPORT
1296 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1297 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1298
1299 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1300 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1301#endif
1302
1303 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
1304 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1305
1306
1307 iova_space_mask = 0xffffffff;
1308 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1309 ioc->imask = iova_space_mask;
1310#ifdef ZX1_SUPPORT
1311 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1312#endif
1313 sba_dump_tlb(ioc->ioc_hpa);
1314
1315 setup_ibase_imask(sba, ioc, ioc_num);
1316
1317 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1318
1319#ifdef CONFIG_64BIT
1320
1321
1322
1323
1324 ioc->imask |= 0xFFFFFFFF00000000UL;
1325#endif
1326
1327
1328 switch (PAGE_SHIFT) {
1329 case 12: tcnfg = 0; break;
1330 case 13: tcnfg = 1; break;
1331 case 14: tcnfg = 2; break;
1332 case 16: tcnfg = 3; break;
1333 default:
1334 panic(__FILE__ "Unsupported system page size %d",
1335 1 << PAGE_SHIFT);
1336 break;
1337 }
1338 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1339
1340
1341
1342
1343
1344 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1345
1346
1347
1348
1349
1350 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1351
1352#ifdef SBA_AGP_SUPPORT
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362 device_for_each_child(&sba->dev, &agp_found, sba_ioc_find_quicksilver);
1363
1364 if (agp_found && sba_reserve_agpgart) {
1365 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
1366 __func__, (iova_space_size/2) >> 20);
1367 ioc->pdir_size /= 2;
1368 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1369 }
1370#endif
1371}
1372
1373static void
1374sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1375{
1376 u32 iova_space_size, iova_space_mask;
1377 unsigned int pdir_size, iov_order;
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393 iova_space_size = (u32) (num_physpages/global_ioc_cnt);
1394
1395
1396 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1397 iova_space_size = 1 << (20 - PAGE_SHIFT);
1398 }
1399 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1400 iova_space_size = 1 << (30 - PAGE_SHIFT);
1401 }
1402
1403
1404
1405
1406
1407
1408 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1409
1410
1411 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1412
1413 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1414
1415 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
1416 __func__,
1417 ioc->ioc_hpa,
1418 (unsigned long) num_physpages >> (20 - PAGE_SHIFT),
1419 iova_space_size>>20,
1420 iov_order + PAGE_SHIFT);
1421
1422 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1423
1424 DBG_INIT("%s() pdir %p size %x\n",
1425 __func__, ioc->pdir_base, pdir_size);
1426
1427#ifdef SBA_HINT_SUPPORT
1428
1429 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1430 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1431
1432 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1433 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1434#endif
1435
1436 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1437
1438
1439 iova_space_mask = 0xffffffff;
1440 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1441
1442
1443
1444
1445
1446 ioc->ibase = 0;
1447 ioc->imask = iova_space_mask;
1448#ifdef ZX1_SUPPORT
1449 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1450#endif
1451
1452 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1453 __func__, ioc->ibase, ioc->imask);
1454
1455
1456
1457
1458
1459
1460
1461 setup_ibase_imask(sba, ioc, ioc_num);
1462
1463
1464
1465
1466 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1467 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1468
1469
1470 WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG);
1471
1472
1473
1474
1475
1476 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1477
1478 ioc->ibase = 0;
1479
1480 DBG_INIT("%s() DONE\n", __func__);
1481}
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
1497{
1498 return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
1499}
1500
1501static void sba_hw_init(struct sba_device *sba_dev)
1502{
1503 int i;
1504 int num_ioc;
1505 u64 ioc_ctl;
1506
1507 if (!is_pdc_pat()) {
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) {
1526 pdc_io_reset_devices();
1527 }
1528
1529 }
1530
1531
1532#if 0
1533printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1534 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class);
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545 if ((PAGE0->mem_boot.cl_class != CL_RANDOM)
1546 && (PAGE0->mem_boot.cl_class != CL_SEQU)) {
1547 pdc_io_reset();
1548 }
1549#endif
1550
1551 if (!IS_PLUTO(sba_dev->dev)) {
1552 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1553 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1554 __func__, sba_dev->sba_hpa, ioc_ctl);
1555 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
1556 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
1557
1558
1559
1560 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
1561
1562#ifdef DEBUG_SBA_INIT
1563 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
1564 DBG_INIT(" 0x%Lx\n", ioc_ctl);
1565#endif
1566 }
1567
1568 if (IS_ASTRO(sba_dev->dev)) {
1569 int err;
1570 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1571 num_ioc = 1;
1572
1573 sba_dev->chip_resv.name = "Astro Intr Ack";
1574 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
1575 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ;
1576 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1577 BUG_ON(err < 0);
1578
1579 } else if (IS_PLUTO(sba_dev->dev)) {
1580 int err;
1581
1582 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1583 num_ioc = 1;
1584
1585 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA";
1586 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL;
1587 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1);
1588 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1589 WARN_ON(err < 0);
1590
1591 sba_dev->iommu_resv.name = "IOVA Space";
1592 sba_dev->iommu_resv.start = 0x40000000UL;
1593 sba_dev->iommu_resv.end = 0x50000000UL - 1;
1594 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
1595 WARN_ON(err < 0);
1596 } else {
1597
1598 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1599 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1600 num_ioc = 2;
1601
1602
1603 }
1604
1605
1606 sba_dev->num_ioc = num_ioc;
1607 for (i = 0; i < num_ioc; i++) {
1608 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa;
1609 unsigned int j;
1610
1611 for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) {
1612
1613
1614
1615
1616
1617
1618
1619 if (IS_PLUTO(sba_dev->dev)) {
1620 void __iomem *rope_cfg;
1621 unsigned long cfg_val;
1622
1623 rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j;
1624 cfg_val = READ_REG(rope_cfg);
1625 cfg_val &= ~IOC_ROPE_AO;
1626 WRITE_REG(cfg_val, rope_cfg);
1627 }
1628
1629
1630
1631
1632 WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j);
1633 }
1634
1635
1636 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1637
1638 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1639 i,
1640 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1641 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1642 );
1643 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
1644 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1645 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1646 );
1647
1648 if (IS_PLUTO(sba_dev->dev)) {
1649 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1650 } else {
1651 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
1652 }
1653 }
1654}
1655
1656static void
1657sba_common_init(struct sba_device *sba_dev)
1658{
1659 int i;
1660
1661
1662
1663
1664 sba_dev->next = sba_list;
1665 sba_list = sba_dev;
1666
1667 for(i=0; i< sba_dev->num_ioc; i++) {
1668 int res_size;
1669#ifdef DEBUG_DMB_TRAP
1670 extern void iterate_pages(unsigned long , unsigned long ,
1671 void (*)(pte_t * , unsigned long),
1672 unsigned long );
1673 void set_data_memory_break(pte_t * , unsigned long);
1674#endif
1675
1676 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64);
1677
1678
1679 if (piranha_bad_128k) {
1680 res_size -= (128*1024)/sizeof(u64);
1681 }
1682
1683 res_size >>= 3;
1684 DBG_INIT("%s() res_size 0x%x\n",
1685 __func__, res_size);
1686
1687 sba_dev->ioc[i].res_size = res_size;
1688 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1689
1690#ifdef DEBUG_DMB_TRAP
1691 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1692 set_data_memory_break, 0);
1693#endif
1694
1695 if (NULL == sba_dev->ioc[i].res_map)
1696 {
1697 panic("%s:%s() could not allocate resource map\n",
1698 __FILE__, __func__ );
1699 }
1700
1701 memset(sba_dev->ioc[i].res_map, 0, res_size);
1702
1703 sba_dev->ioc[i].res_hint = (unsigned long *)
1704 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1705
1706#ifdef ASSERT_PDIR_SANITY
1707
1708 sba_dev->ioc[i].res_map[0] = 0x80;
1709 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
1710#endif
1711
1712
1713 if (piranha_bad_128k) {
1714
1715
1716 int idx_start = (1408*1024/sizeof(u64)) >> 3;
1717 int idx_end = (1536*1024/sizeof(u64)) >> 3;
1718 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
1719 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
1720
1721
1722 while (p_start < p_end)
1723 *p_start++ = -1;
1724
1725 }
1726
1727#ifdef DEBUG_DMB_TRAP
1728 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1729 set_data_memory_break, 0);
1730 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
1731 set_data_memory_break, 0);
1732#endif
1733
1734 DBG_INIT("%s() %d res_map %x %p\n",
1735 __func__, i, res_size, sba_dev->ioc[i].res_map);
1736 }
1737
1738 spin_lock_init(&sba_dev->sba_lock);
1739 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC;
1740
1741#ifdef DEBUG_SBA_INIT
1742
1743
1744
1745
1746
1747 if (ioc_needs_fdc) {
1748 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
1749 } else {
1750 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
1751 }
1752#endif
1753}
1754
1755#ifdef CONFIG_PROC_FS
1756static int sba_proc_info(struct seq_file *m, void *p)
1757{
1758 struct sba_device *sba_dev = sba_list;
1759 struct ioc *ioc = &sba_dev->ioc[0];
1760 int total_pages = (int) (ioc->res_size << 3);
1761#ifdef SBA_COLLECT_STATS
1762 unsigned long avg = 0, min, max;
1763#endif
1764 int i, len = 0;
1765
1766 len += seq_printf(m, "%s rev %d.%d\n",
1767 sba_dev->name,
1768 (sba_dev->hw_rev & 0x7) + 1,
1769 (sba_dev->hw_rev & 0x18) >> 3
1770 );
1771 len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1772 (int) ((ioc->res_size << 3) * sizeof(u64)),
1773 total_pages);
1774
1775 len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1776 ioc->res_size, ioc->res_size << 3);
1777
1778 len += seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
1779 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
1780 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
1781 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE)
1782 );
1783
1784 for (i=0; i<4; i++)
1785 len += seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n", i,
1786 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18),
1787 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18),
1788 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18)
1789 );
1790
1791#ifdef SBA_COLLECT_STATS
1792 len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1793 total_pages - ioc->used_pages, ioc->used_pages,
1794 (int) (ioc->used_pages * 100 / total_pages));
1795
1796 min = max = ioc->avg_search[0];
1797 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1798 avg += ioc->avg_search[i];
1799 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1800 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1801 }
1802 avg /= SBA_SEARCH_SAMPLE;
1803 len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1804 min, avg, max);
1805
1806 len += seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
1807 ioc->msingle_calls, ioc->msingle_pages,
1808 (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1809
1810
1811 min = ioc->usingle_calls;
1812 max = ioc->usingle_pages - ioc->usg_pages;
1813 len += seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
1814 min, max, (int) ((max * 1000)/min));
1815
1816 len += seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1817 ioc->msg_calls, ioc->msg_pages,
1818 (int) ((ioc->msg_pages * 1000)/ioc->msg_calls));
1819
1820 len += seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1821 ioc->usg_calls, ioc->usg_pages,
1822 (int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
1823#endif
1824
1825 return 0;
1826}
1827
1828static int
1829sba_proc_open(struct inode *i, struct file *f)
1830{
1831 return single_open(f, &sba_proc_info, NULL);
1832}
1833
1834static const struct file_operations sba_proc_fops = {
1835 .owner = THIS_MODULE,
1836 .open = sba_proc_open,
1837 .read = seq_read,
1838 .llseek = seq_lseek,
1839 .release = single_release,
1840};
1841
1842static int
1843sba_proc_bitmap_info(struct seq_file *m, void *p)
1844{
1845 struct sba_device *sba_dev = sba_list;
1846 struct ioc *ioc = &sba_dev->ioc[0];
1847 unsigned int *res_ptr = (unsigned int *)ioc->res_map;
1848 int i, len = 0;
1849
1850 for (i = 0; i < (ioc->res_size/sizeof(unsigned int)); ++i, ++res_ptr) {
1851 if ((i & 7) == 0)
1852 len += seq_printf(m, "\n ");
1853 len += seq_printf(m, " %08x", *res_ptr);
1854 }
1855 len += seq_printf(m, "\n");
1856
1857 return 0;
1858}
1859
1860static int
1861sba_proc_bitmap_open(struct inode *i, struct file *f)
1862{
1863 return single_open(f, &sba_proc_bitmap_info, NULL);
1864}
1865
1866static const struct file_operations sba_proc_bitmap_fops = {
1867 .owner = THIS_MODULE,
1868 .open = sba_proc_bitmap_open,
1869 .read = seq_read,
1870 .llseek = seq_lseek,
1871 .release = single_release,
1872};
1873#endif
1874
1875static struct parisc_device_id sba_tbl[] = {
1876 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb },
1877 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
1878 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
1879 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
1880 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
1881 { 0, }
1882};
1883
1884static int sba_driver_callback(struct parisc_device *);
1885
1886static struct parisc_driver sba_driver = {
1887 .name = MODULE_NAME,
1888 .id_table = sba_tbl,
1889 .probe = sba_driver_callback,
1890};
1891
1892
1893
1894
1895
1896
1897static int sba_driver_callback(struct parisc_device *dev)
1898{
1899 struct sba_device *sba_dev;
1900 u32 func_class;
1901 int i;
1902 char *version;
1903 void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
1904#ifdef CONFIG_PROC_FS
1905 struct proc_dir_entry *root;
1906#endif
1907
1908 sba_dump_ranges(sba_addr);
1909
1910
1911 func_class = READ_REG(sba_addr + SBA_FCLASS);
1912
1913 if (IS_ASTRO(dev)) {
1914 unsigned long fclass;
1915 static char astro_rev[]="Astro ?.?";
1916
1917
1918 fclass = READ_REG(sba_addr);
1919
1920 astro_rev[6] = '1' + (char) (fclass & 0x7);
1921 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
1922 version = astro_rev;
1923
1924 } else if (IS_IKE(dev)) {
1925 static char ike_rev[] = "Ike rev ?";
1926 ike_rev[8] = '0' + (char) (func_class & 0xff);
1927 version = ike_rev;
1928 } else if (IS_PLUTO(dev)) {
1929 static char pluto_rev[]="Pluto ?.?";
1930 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
1931 pluto_rev[8] = '0' + (char) (func_class & 0x0f);
1932 version = pluto_rev;
1933 } else {
1934 static char reo_rev[] = "REO rev ?";
1935 reo_rev[8] = '0' + (char) (func_class & 0xff);
1936 version = reo_rev;
1937 }
1938
1939 if (!global_ioc_cnt) {
1940 global_ioc_cnt = count_parisc_driver(&sba_driver);
1941
1942
1943 if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev)))
1944 global_ioc_cnt *= 2;
1945 }
1946
1947 printk(KERN_INFO "%s found %s at 0x%llx\n",
1948 MODULE_NAME, version, (unsigned long long)dev->hpa.start);
1949
1950 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL);
1951 if (!sba_dev) {
1952 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
1953 return -ENOMEM;
1954 }
1955
1956 parisc_set_drvdata(dev, sba_dev);
1957
1958 for(i=0; i<MAX_IOC; i++)
1959 spin_lock_init(&(sba_dev->ioc[i].res_lock));
1960
1961 sba_dev->dev = dev;
1962 sba_dev->hw_rev = func_class;
1963 sba_dev->name = dev->name;
1964 sba_dev->sba_hpa = sba_addr;
1965
1966 sba_get_pat_resources(sba_dev);
1967 sba_hw_init(sba_dev);
1968 sba_common_init(sba_dev);
1969
1970 hppa_dma_ops = &sba_ops;
1971
1972#ifdef CONFIG_PROC_FS
1973 switch (dev->id.hversion) {
1974 case PLUTO_MCKINLEY_PORT:
1975 root = proc_mckinley_root;
1976 break;
1977 case ASTRO_RUNWAY_PORT:
1978 case IKE_MERCED_PORT:
1979 default:
1980 root = proc_runway_root;
1981 break;
1982 }
1983
1984 proc_create("sba_iommu", 0, root, &sba_proc_fops);
1985 proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops);
1986#endif
1987
1988 parisc_has_iommu();
1989 return 0;
1990}
1991
1992
1993
1994
1995
1996
1997void __init sba_init(void)
1998{
1999 register_parisc_driver(&sba_driver);
2000}
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010void * sba_get_iommu(struct parisc_device *pci_hba)
2011{
2012 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2013 struct sba_device *sba = sba_dev->dev.driver_data;
2014 char t = sba_dev->id.hw_type;
2015 int iocnum = (pci_hba->hw_path >> 3);
2016
2017 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT));
2018
2019 return &(sba->ioc[iocnum]);
2020}
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
2032{
2033 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2034 struct sba_device *sba = sba_dev->dev.driver_data;
2035 char t = sba_dev->id.hw_type;
2036 int i;
2037 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2038
2039 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2040
2041 r->start = r->end = 0;
2042
2043
2044 for (i=0; i<4; i++) {
2045 int base, size;
2046 void __iomem *reg = sba->sba_hpa + i*0x18;
2047
2048 base = READ_REG32(reg + LMMIO_DIRECT0_BASE);
2049 if ((base & 1) == 0)
2050 continue;
2051
2052 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE);
2053
2054 if ((size & (ROPES_PER_IOC-1)) != rope)
2055 continue;
2056
2057 r->start = (base & ~1UL) | PCI_F_EXTEND;
2058 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
2059 r->end = r->start + size;
2060 }
2061}
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
2074{
2075 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2076 struct sba_device *sba = sba_dev->dev.driver_data;
2077 char t = sba_dev->id.hw_type;
2078 int base, size;
2079 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2080
2081 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2082
2083 r->start = r->end = 0;
2084
2085 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE);
2086 if ((base & 1) == 0) {
2087 BUG();
2088 return;
2089 }
2090
2091 r->start = (base & ~1UL) | PCI_F_EXTEND;
2092
2093 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
2094 r->start += rope * (size + 1);
2095 r->end = r->start + size;
2096}
2097