1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/kernel.h>
16#include <linux/pci.h>
17#include <linux/string.h>
18#include <linux/init.h>
19#include <linux/delay.h>
20#include <linux/export.h>
21#include <linux/of_address.h>
22#include <linux/of_pci.h>
23#include <linux/mm.h>
24#include <linux/shmem_fs.h>
25#include <linux/list.h>
26#include <linux/syscalls.h>
27#include <linux/irq.h>
28#include <linux/vmalloc.h>
29#include <linux/slab.h>
30#include <linux/vgaarb.h>
31#include <linux/numa.h>
32
33#include <asm/processor.h>
34#include <asm/io.h>
35#include <asm/prom.h>
36#include <asm/pci-bridge.h>
37#include <asm/byteorder.h>
38#include <asm/machdep.h>
39#include <asm/ppc-pci.h>
40#include <asm/eeh.h>
41
42#include "../../../drivers/pci/pci.h"
43
44
45static DEFINE_SPINLOCK(hose_spinlock);
46LIST_HEAD(hose_list);
47
48
49#define MAX_PHBS 0x10000
50
51
52
53
54
55static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
56
57
58resource_size_t isa_mem_base;
59EXPORT_SYMBOL(isa_mem_base);
60
61
62static const struct dma_map_ops *pci_dma_ops;
63
64void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
65{
66 pci_dma_ops = dma_ops;
67}
68
69
70
71
72
73static int get_phb_number(struct device_node *dn)
74{
75 int ret, phb_id = -1;
76 u32 prop_32;
77 u64 prop;
78
79
80
81
82
83
84 ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
85 if (ret) {
86 ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
87 prop = prop_32;
88 }
89
90 if (!ret)
91 phb_id = (int)(prop & (MAX_PHBS - 1));
92
93
94 if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
95 return phb_id;
96
97
98
99
100
101 phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
102 BUG_ON(phb_id >= MAX_PHBS);
103 set_bit(phb_id, phb_bitmap);
104
105 return phb_id;
106}
107
108struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
109{
110 struct pci_controller *phb;
111
112 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
113 if (phb == NULL)
114 return NULL;
115 spin_lock(&hose_spinlock);
116 phb->global_number = get_phb_number(dev);
117 list_add_tail(&phb->list_node, &hose_list);
118 spin_unlock(&hose_spinlock);
119 phb->dn = dev;
120 phb->is_dynamic = slab_is_available();
121#ifdef CONFIG_PPC64
122 if (dev) {
123 int nid = of_node_to_nid(dev);
124
125 if (nid < 0 || !node_online(nid))
126 nid = NUMA_NO_NODE;
127
128 PHB_SET_NODE(phb, nid);
129 }
130#endif
131 return phb;
132}
133EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
134
135void pcibios_free_controller(struct pci_controller *phb)
136{
137 spin_lock(&hose_spinlock);
138
139
140 if (phb->global_number < MAX_PHBS)
141 clear_bit(phb->global_number, phb_bitmap);
142
143 list_del(&phb->list_node);
144 spin_unlock(&hose_spinlock);
145
146 if (phb->is_dynamic)
147 kfree(phb);
148}
149EXPORT_SYMBOL_GPL(pcibios_free_controller);
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
177{
178 struct pci_controller *phb = (struct pci_controller *)
179 bridge->release_data;
180
181 pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
182
183 pcibios_free_controller(phb);
184}
185EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
186
187
188
189
190
191
192
193resource_size_t pcibios_window_alignment(struct pci_bus *bus,
194 unsigned long type)
195{
196 struct pci_controller *phb = pci_bus_to_host(bus);
197
198 if (phb->controller_ops.window_alignment)
199 return phb->controller_ops.window_alignment(bus, type);
200
201
202
203
204
205
206 return 1;
207}
208
209void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
210{
211 struct pci_controller *hose = pci_bus_to_host(bus);
212
213 if (hose->controller_ops.setup_bridge)
214 hose->controller_ops.setup_bridge(bus, type);
215}
216
217void pcibios_reset_secondary_bus(struct pci_dev *dev)
218{
219 struct pci_controller *phb = pci_bus_to_host(dev->bus);
220
221 if (phb->controller_ops.reset_secondary_bus) {
222 phb->controller_ops.reset_secondary_bus(dev);
223 return;
224 }
225
226 pci_reset_secondary_bus(dev);
227}
228
229resource_size_t pcibios_default_alignment(void)
230{
231 if (ppc_md.pcibios_default_alignment)
232 return ppc_md.pcibios_default_alignment();
233
234 return 0;
235}
236
237#ifdef CONFIG_PCI_IOV
238resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
239{
240 if (ppc_md.pcibios_iov_resource_alignment)
241 return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
242
243 return pci_iov_resource_size(pdev, resno);
244}
245
246int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
247{
248 if (ppc_md.pcibios_sriov_enable)
249 return ppc_md.pcibios_sriov_enable(pdev, num_vfs);
250
251 return 0;
252}
253
254int pcibios_sriov_disable(struct pci_dev *pdev)
255{
256 if (ppc_md.pcibios_sriov_disable)
257 return ppc_md.pcibios_sriov_disable(pdev);
258
259 return 0;
260}
261
262#endif
263
264static resource_size_t pcibios_io_size(const struct pci_controller *hose)
265{
266#ifdef CONFIG_PPC64
267 return hose->pci_io_size;
268#else
269 return resource_size(&hose->io_resource);
270#endif
271}
272
273int pcibios_vaddr_is_ioport(void __iomem *address)
274{
275 int ret = 0;
276 struct pci_controller *hose;
277 resource_size_t size;
278
279 spin_lock(&hose_spinlock);
280 list_for_each_entry(hose, &hose_list, list_node) {
281 size = pcibios_io_size(hose);
282 if (address >= hose->io_base_virt &&
283 address < (hose->io_base_virt + size)) {
284 ret = 1;
285 break;
286 }
287 }
288 spin_unlock(&hose_spinlock);
289 return ret;
290}
291
292unsigned long pci_address_to_pio(phys_addr_t address)
293{
294 struct pci_controller *hose;
295 resource_size_t size;
296 unsigned long ret = ~0;
297
298 spin_lock(&hose_spinlock);
299 list_for_each_entry(hose, &hose_list, list_node) {
300 size = pcibios_io_size(hose);
301 if (address >= hose->io_base_phys &&
302 address < (hose->io_base_phys + size)) {
303 unsigned long base =
304 (unsigned long)hose->io_base_virt - _IO_BASE;
305 ret = base + (address - hose->io_base_phys);
306 break;
307 }
308 }
309 spin_unlock(&hose_spinlock);
310
311 return ret;
312}
313EXPORT_SYMBOL_GPL(pci_address_to_pio);
314
315
316
317
318int pci_domain_nr(struct pci_bus *bus)
319{
320 struct pci_controller *hose = pci_bus_to_host(bus);
321
322 return hose->global_number;
323}
324EXPORT_SYMBOL(pci_domain_nr);
325
326
327
328
329
330
331
332
333struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
334{
335 while(node) {
336 struct pci_controller *hose, *tmp;
337 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
338 if (hose->dn == node)
339 return hose;
340 node = node->parent;
341 }
342 return NULL;
343}
344
345struct pci_controller *pci_find_controller_for_domain(int domain_nr)
346{
347 struct pci_controller *hose;
348
349 list_for_each_entry(hose, &hose_list, list_node)
350 if (hose->global_number == domain_nr)
351 return hose;
352
353 return NULL;
354}
355
356struct pci_intx_virq {
357 int virq;
358 struct kref kref;
359 struct list_head list_node;
360};
361
362static LIST_HEAD(intx_list);
363static DEFINE_MUTEX(intx_mutex);
364
365static void ppc_pci_intx_release(struct kref *kref)
366{
367 struct pci_intx_virq *vi = container_of(kref, struct pci_intx_virq, kref);
368
369 list_del(&vi->list_node);
370 irq_dispose_mapping(vi->virq);
371 kfree(vi);
372}
373
374static int ppc_pci_unmap_irq_line(struct notifier_block *nb,
375 unsigned long action, void *data)
376{
377 struct pci_dev *pdev = to_pci_dev(data);
378
379 if (action == BUS_NOTIFY_DEL_DEVICE) {
380 struct pci_intx_virq *vi;
381
382 mutex_lock(&intx_mutex);
383 list_for_each_entry(vi, &intx_list, list_node) {
384 if (vi->virq == pdev->irq) {
385 kref_put(&vi->kref, ppc_pci_intx_release);
386 break;
387 }
388 }
389 mutex_unlock(&intx_mutex);
390 }
391
392 return NOTIFY_DONE;
393}
394
395static struct notifier_block ppc_pci_unmap_irq_notifier = {
396 .notifier_call = ppc_pci_unmap_irq_line,
397};
398
399static int ppc_pci_register_irq_notifier(void)
400{
401 return bus_register_notifier(&pci_bus_type, &ppc_pci_unmap_irq_notifier);
402}
403arch_initcall(ppc_pci_register_irq_notifier);
404
405
406
407
408
409
410static int pci_read_irq_line(struct pci_dev *pci_dev)
411{
412 int virq;
413 struct pci_intx_virq *vi, *vitmp;
414
415
416 vi = kzalloc(sizeof(struct pci_intx_virq), GFP_KERNEL);
417 if (!vi)
418 return -1;
419
420 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
421
422
423 virq = of_irq_parse_and_map_pci(pci_dev, 0, 0);
424 if (virq <= 0) {
425 u8 line, pin;
426
427
428
429
430
431
432
433
434 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
435 goto error_exit;
436 if (pin == 0)
437 goto error_exit;
438 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
439 line == 0xff || line == 0) {
440 goto error_exit;
441 }
442 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
443 line, pin);
444
445 virq = irq_create_mapping(NULL, line);
446 if (virq)
447 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
448 }
449
450 if (!virq) {
451 pr_debug(" Failed to map !\n");
452 goto error_exit;
453 }
454
455 pr_debug(" Mapped to linux irq %d\n", virq);
456
457 pci_dev->irq = virq;
458
459 mutex_lock(&intx_mutex);
460 list_for_each_entry(vitmp, &intx_list, list_node) {
461 if (vitmp->virq == virq) {
462 kref_get(&vitmp->kref);
463 kfree(vi);
464 vi = NULL;
465 break;
466 }
467 }
468 if (vi) {
469 vi->virq = virq;
470 kref_init(&vi->kref);
471 list_add_tail(&vi->list_node, &intx_list);
472 }
473 mutex_unlock(&intx_mutex);
474
475 return 0;
476error_exit:
477 kfree(vi);
478 return -1;
479}
480
481
482
483
484
485int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
486{
487 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
488 resource_size_t ioaddr = pci_resource_start(pdev, bar);
489
490 if (!hose)
491 return -EINVAL;
492
493
494 ioaddr -= (unsigned long)hose->io_base_virt - _IO_BASE;
495
496 vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
497 return 0;
498}
499
500
501
502
503
504
505pgprot_t pci_phys_mem_access_prot(struct file *file,
506 unsigned long pfn,
507 unsigned long size,
508 pgprot_t prot)
509{
510 struct pci_dev *pdev = NULL;
511 struct resource *found = NULL;
512 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
513 int i;
514
515 if (page_is_ram(pfn))
516 return prot;
517
518 prot = pgprot_noncached(prot);
519 for_each_pci_dev(pdev) {
520 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
521 struct resource *rp = &pdev->resource[i];
522 int flags = rp->flags;
523
524
525 if ((flags & IORESOURCE_MEM) == 0)
526 continue;
527
528 if (offset < (rp->start & PAGE_MASK) ||
529 offset > rp->end)
530 continue;
531 found = rp;
532 break;
533 }
534 if (found)
535 break;
536 }
537 if (found) {
538 if (found->flags & IORESOURCE_PREFETCH)
539 prot = pgprot_noncached_wc(prot);
540 pci_dev_put(pdev);
541 }
542
543 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
544 (unsigned long long)offset, pgprot_val(prot));
545
546 return prot;
547}
548
549
550int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
551{
552 unsigned long offset;
553 struct pci_controller *hose = pci_bus_to_host(bus);
554 struct resource *rp = &hose->io_resource;
555 void __iomem *addr;
556
557
558
559
560
561
562 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
563 offset += port;
564
565 if (!(rp->flags & IORESOURCE_IO))
566 return -ENXIO;
567 if (offset < rp->start || (offset + size) > rp->end)
568 return -ENXIO;
569 addr = hose->io_base_virt + port;
570
571 switch(size) {
572 case 1:
573 *((u8 *)val) = in_8(addr);
574 return 1;
575 case 2:
576 if (port & 1)
577 return -EINVAL;
578 *((u16 *)val) = in_le16(addr);
579 return 2;
580 case 4:
581 if (port & 3)
582 return -EINVAL;
583 *((u32 *)val) = in_le32(addr);
584 return 4;
585 }
586 return -EINVAL;
587}
588
589
590int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
591{
592 unsigned long offset;
593 struct pci_controller *hose = pci_bus_to_host(bus);
594 struct resource *rp = &hose->io_resource;
595 void __iomem *addr;
596
597
598
599
600
601
602 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
603 offset += port;
604
605 if (!(rp->flags & IORESOURCE_IO))
606 return -ENXIO;
607 if (offset < rp->start || (offset + size) > rp->end)
608 return -ENXIO;
609 addr = hose->io_base_virt + port;
610
611
612
613
614
615
616 switch(size) {
617 case 1:
618 out_8(addr, val >> 24);
619 return 1;
620 case 2:
621 if (port & 1)
622 return -EINVAL;
623 out_le16(addr, val >> 16);
624 return 2;
625 case 4:
626 if (port & 3)
627 return -EINVAL;
628 out_le32(addr, val);
629 return 4;
630 }
631 return -EINVAL;
632}
633
634
635int pci_mmap_legacy_page_range(struct pci_bus *bus,
636 struct vm_area_struct *vma,
637 enum pci_mmap_state mmap_state)
638{
639 struct pci_controller *hose = pci_bus_to_host(bus);
640 resource_size_t offset =
641 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
642 resource_size_t size = vma->vm_end - vma->vm_start;
643 struct resource *rp;
644
645 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
646 pci_domain_nr(bus), bus->number,
647 mmap_state == pci_mmap_mem ? "MEM" : "IO",
648 (unsigned long long)offset,
649 (unsigned long long)(offset + size - 1));
650
651 if (mmap_state == pci_mmap_mem) {
652
653
654
655
656
657
658
659 if ((offset + size) > hose->isa_mem_size) {
660 printk(KERN_DEBUG
661 "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
662 current->comm, current->pid, pci_domain_nr(bus), bus->number);
663 if (vma->vm_flags & VM_SHARED)
664 return shmem_zero_setup(vma);
665 return 0;
666 }
667 offset += hose->isa_mem_phys;
668 } else {
669 unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
670 unsigned long roffset = offset + io_offset;
671 rp = &hose->io_resource;
672 if (!(rp->flags & IORESOURCE_IO))
673 return -ENXIO;
674 if (roffset < rp->start || (roffset + size) > rp->end)
675 return -ENXIO;
676 offset += hose->io_base_phys;
677 }
678 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
679
680 vma->vm_pgoff = offset >> PAGE_SHIFT;
681 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
682 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
683 vma->vm_end - vma->vm_start,
684 vma->vm_page_prot);
685}
686
687void pci_resource_to_user(const struct pci_dev *dev, int bar,
688 const struct resource *rsrc,
689 resource_size_t *start, resource_size_t *end)
690{
691 struct pci_bus_region region;
692
693 if (rsrc->flags & IORESOURCE_IO) {
694 pcibios_resource_to_bus(dev->bus, ®ion,
695 (struct resource *) rsrc);
696 *start = region.start;
697 *end = region.end;
698 return;
699 }
700
701
702
703
704
705
706
707
708 *start = rsrc->start;
709 *end = rsrc->end;
710}
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736void pci_process_bridge_OF_ranges(struct pci_controller *hose,
737 struct device_node *dev, int primary)
738{
739 int memno = 0;
740 struct resource *res;
741 struct of_pci_range range;
742 struct of_pci_range_parser parser;
743
744 printk(KERN_INFO "PCI host bridge %pOF %s ranges:\n",
745 dev, primary ? "(primary)" : "");
746
747
748 if (of_pci_range_parser_init(&parser, dev))
749 return;
750
751
752 for_each_of_pci_range(&parser, &range) {
753
754
755
756
757
758 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
759 continue;
760
761
762 res = NULL;
763 switch (range.flags & IORESOURCE_TYPE_BITS) {
764 case IORESOURCE_IO:
765 printk(KERN_INFO
766 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
767 range.cpu_addr, range.cpu_addr + range.size - 1,
768 range.pci_addr);
769
770
771 if (hose->pci_io_size) {
772 printk(KERN_INFO
773 " \\--> Skipped (too many) !\n");
774 continue;
775 }
776#ifdef CONFIG_PPC32
777
778 if (range.size > 0x01000000)
779 range.size = 0x01000000;
780
781
782 hose->io_base_virt = ioremap(range.cpu_addr,
783 range.size);
784
785
786 if (primary)
787 isa_io_base =
788 (unsigned long)hose->io_base_virt;
789#endif
790
791
792
793 hose->pci_io_size = range.pci_addr + range.size;
794 hose->io_base_phys = range.cpu_addr - range.pci_addr;
795
796
797 res = &hose->io_resource;
798 range.cpu_addr = range.pci_addr;
799 break;
800 case IORESOURCE_MEM:
801 printk(KERN_INFO
802 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
803 range.cpu_addr, range.cpu_addr + range.size - 1,
804 range.pci_addr,
805 (range.flags & IORESOURCE_PREFETCH) ?
806 "Prefetch" : "");
807
808
809 if (memno >= 3) {
810 printk(KERN_INFO
811 " \\--> Skipped (too many) !\n");
812 continue;
813 }
814
815 if (range.pci_addr == 0) {
816 if (primary || isa_mem_base == 0)
817 isa_mem_base = range.cpu_addr;
818 hose->isa_mem_phys = range.cpu_addr;
819 hose->isa_mem_size = range.size;
820 }
821
822
823 hose->mem_offset[memno] = range.cpu_addr -
824 range.pci_addr;
825 res = &hose->mem_resources[memno++];
826 break;
827 }
828 if (res != NULL) {
829 res->name = dev->full_name;
830 res->flags = range.flags;
831 res->start = range.cpu_addr;
832 res->end = range.cpu_addr + range.size - 1;
833 res->parent = res->child = res->sibling = NULL;
834 }
835 }
836}
837
838
839int pci_proc_domain(struct pci_bus *bus)
840{
841 struct pci_controller *hose = pci_bus_to_host(bus);
842
843 if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
844 return 0;
845 if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
846 return hose->global_number != 0;
847 return 1;
848}
849
850int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
851{
852 if (ppc_md.pcibios_root_bridge_prepare)
853 return ppc_md.pcibios_root_bridge_prepare(bridge);
854
855 return 0;
856}
857
858
859
860
861static void pcibios_fixup_resources(struct pci_dev *dev)
862{
863 struct pci_controller *hose = pci_bus_to_host(dev->bus);
864 int i;
865
866 if (!hose) {
867 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
868 pci_name(dev));
869 return;
870 }
871
872 if (dev->is_virtfn)
873 return;
874
875 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
876 struct resource *res = dev->resource + i;
877 struct pci_bus_region reg;
878 if (!res->flags)
879 continue;
880
881
882
883
884
885
886 pcibios_resource_to_bus(dev->bus, ®, res);
887 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
888 (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
889
890 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
891 pr_debug("PCI:%s Resource %d %pR is unassigned\n",
892 pci_name(dev), i, res);
893 res->end -= res->start;
894 res->start = 0;
895 res->flags |= IORESOURCE_UNSET;
896 continue;
897 }
898
899 pr_debug("PCI:%s Resource %d %pR\n", pci_name(dev), i, res);
900 }
901
902
903 if (ppc_md.pcibios_fixup_resources)
904 ppc_md.pcibios_fixup_resources(dev);
905}
906DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
907
908
909
910
911
912
913static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
914 struct resource *res)
915{
916 struct pci_controller *hose = pci_bus_to_host(bus);
917 struct pci_dev *dev = bus->self;
918 resource_size_t offset;
919 struct pci_bus_region region;
920 u16 command;
921 int i;
922
923
924 if (pci_has_flag(PCI_PROBE_ONLY))
925 return 0;
926
927
928 if (res->flags & IORESOURCE_MEM) {
929 pcibios_resource_to_bus(dev->bus, ®ion, res);
930
931
932 if (region.start != 0)
933 return 0;
934
935
936
937
938 pci_read_config_word(dev, PCI_COMMAND, &command);
939 if ((command & PCI_COMMAND_MEMORY) == 0)
940 return 1;
941
942
943
944
945
946 for (i = 0; i < 3; i++) {
947 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
948 hose->mem_resources[i].start == hose->mem_offset[i])
949 return 0;
950 }
951
952
953
954
955 return 1;
956 } else {
957
958 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
959 if (((res->start - offset) & 0xfffffffful) != 0)
960 return 0;
961
962
963
964
965
966
967 pci_read_config_word(dev, PCI_COMMAND, &command);
968 if (command & PCI_COMMAND_IO)
969 return 0;
970
971
972
973
974 return 1;
975 }
976}
977
978
979static void pcibios_fixup_bridge(struct pci_bus *bus)
980{
981 struct resource *res;
982 int i;
983
984 struct pci_dev *dev = bus->self;
985
986 pci_bus_for_each_resource(bus, res, i) {
987 if (!res || !res->flags)
988 continue;
989 if (i >= 3 && bus->self->transparent)
990 continue;
991
992
993
994
995
996 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
997 res->flags |= IORESOURCE_UNSET;
998 res->start = 0;
999 res->end = -1;
1000 continue;
1001 }
1002
1003 pr_debug("PCI:%s Bus rsrc %d %pR\n", pci_name(dev), i, res);
1004
1005
1006
1007
1008 if (pcibios_uninitialized_bridge_resource(bus, res)) {
1009 res->flags = 0;
1010 pr_debug("PCI:%s (unassigned)\n", pci_name(dev));
1011 }
1012 }
1013}
1014
1015void pcibios_setup_bus_self(struct pci_bus *bus)
1016{
1017 struct pci_controller *phb;
1018
1019
1020 if (bus->self != NULL)
1021 pcibios_fixup_bridge(bus);
1022
1023
1024
1025
1026 if (ppc_md.pcibios_fixup_bus)
1027 ppc_md.pcibios_fixup_bus(bus);
1028
1029
1030 phb = pci_bus_to_host(bus);
1031 if (phb->controller_ops.dma_bus_setup)
1032 phb->controller_ops.dma_bus_setup(bus);
1033}
1034
1035void pcibios_bus_add_device(struct pci_dev *dev)
1036{
1037 struct pci_controller *phb;
1038
1039
1040
1041 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1042
1043
1044 set_dma_ops(&dev->dev, pci_dma_ops);
1045 dev->dev.archdata.dma_offset = PCI_DRAM_OFFSET;
1046
1047
1048 phb = pci_bus_to_host(dev->bus);
1049 if (phb->controller_ops.dma_dev_setup)
1050 phb->controller_ops.dma_dev_setup(dev);
1051
1052
1053 pci_read_irq_line(dev);
1054 if (ppc_md.pci_irq_fixup)
1055 ppc_md.pci_irq_fixup(dev);
1056
1057 if (ppc_md.pcibios_bus_add_device)
1058 ppc_md.pcibios_bus_add_device(dev);
1059}
1060
1061int pcibios_add_device(struct pci_dev *dev)
1062{
1063#ifdef CONFIG_PCI_IOV
1064 if (ppc_md.pcibios_fixup_sriov)
1065 ppc_md.pcibios_fixup_sriov(dev);
1066#endif
1067
1068 return 0;
1069}
1070
1071void pcibios_set_master(struct pci_dev *dev)
1072{
1073
1074}
1075
1076void pcibios_fixup_bus(struct pci_bus *bus)
1077{
1078
1079
1080
1081
1082 pci_read_bridge_bases(bus);
1083
1084
1085 pcibios_setup_bus_self(bus);
1086}
1087EXPORT_SYMBOL(pcibios_fixup_bus);
1088
1089static int skip_isa_ioresource_align(struct pci_dev *dev)
1090{
1091 if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
1092 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1093 return 1;
1094 return 0;
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1111 resource_size_t size, resource_size_t align)
1112{
1113 struct pci_dev *dev = data;
1114 resource_size_t start = res->start;
1115
1116 if (res->flags & IORESOURCE_IO) {
1117 if (skip_isa_ioresource_align(dev))
1118 return start;
1119 if (start & 0x300)
1120 start = (start + 0x3ff) & ~0x3ff;
1121 }
1122
1123 return start;
1124}
1125EXPORT_SYMBOL(pcibios_align_resource);
1126
1127
1128
1129
1130
1131static int reparent_resources(struct resource *parent,
1132 struct resource *res)
1133{
1134 struct resource *p, **pp;
1135 struct resource **firstpp = NULL;
1136
1137 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1138 if (p->end < res->start)
1139 continue;
1140 if (res->end < p->start)
1141 break;
1142 if (p->start < res->start || p->end > res->end)
1143 return -1;
1144 if (firstpp == NULL)
1145 firstpp = pp;
1146 }
1147 if (firstpp == NULL)
1148 return -1;
1149 res->parent = parent;
1150 res->child = *firstpp;
1151 res->sibling = *pp;
1152 *firstpp = res;
1153 *pp = NULL;
1154 for (p = res->child; p != NULL; p = p->sibling) {
1155 p->parent = res;
1156 pr_debug("PCI: Reparented %s %pR under %s\n",
1157 p->name, p, res->name);
1158 }
1159 return 0;
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1196{
1197 struct pci_bus *b;
1198 int i;
1199 struct resource *res, *pr;
1200
1201 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1202 pci_domain_nr(bus), bus->number);
1203
1204 pci_bus_for_each_resource(bus, res, i) {
1205 if (!res || !res->flags || res->start > res->end || res->parent)
1206 continue;
1207
1208
1209 if (res->flags & IORESOURCE_UNSET)
1210 goto clear_resource;
1211
1212 if (bus->parent == NULL)
1213 pr = (res->flags & IORESOURCE_IO) ?
1214 &ioport_resource : &iomem_resource;
1215 else {
1216 pr = pci_find_parent_resource(bus->self, res);
1217 if (pr == res) {
1218
1219
1220
1221
1222 continue;
1223 }
1224 }
1225
1226 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %pR, parent %p (%s)\n",
1227 bus->self ? pci_name(bus->self) : "PHB", bus->number,
1228 i, res, pr, (pr && pr->name) ? pr->name : "nil");
1229
1230 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1231 struct pci_dev *dev = bus->self;
1232
1233 if (request_resource(pr, res) == 0)
1234 continue;
1235
1236
1237
1238
1239
1240 if (reparent_resources(pr, res) == 0)
1241 continue;
1242
1243 if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
1244 pci_claim_bridge_resource(dev,
1245 i + PCI_BRIDGE_RESOURCES) == 0)
1246 continue;
1247 }
1248 pr_warn("PCI: Cannot allocate resource region %d of PCI bridge %d, will remap\n",
1249 i, bus->number);
1250 clear_resource:
1251
1252
1253
1254
1255
1256
1257 res->start = 0;
1258 res->end = -1;
1259 res->flags = 0;
1260 }
1261
1262 list_for_each_entry(b, &bus->children, node)
1263 pcibios_allocate_bus_resources(b);
1264}
1265
1266static inline void alloc_resource(struct pci_dev *dev, int idx)
1267{
1268 struct resource *pr, *r = &dev->resource[idx];
1269
1270 pr_debug("PCI: Allocating %s: Resource %d: %pR\n",
1271 pci_name(dev), idx, r);
1272
1273 pr = pci_find_parent_resource(dev, r);
1274 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1275 request_resource(pr, r) < 0) {
1276 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1277 " of device %s, will remap\n", idx, pci_name(dev));
1278 if (pr)
1279 pr_debug("PCI: parent is %p: %pR\n", pr, pr);
1280
1281 r->flags |= IORESOURCE_UNSET;
1282 r->end -= r->start;
1283 r->start = 0;
1284 }
1285}
1286
1287static void __init pcibios_allocate_resources(int pass)
1288{
1289 struct pci_dev *dev = NULL;
1290 int idx, disabled;
1291 u16 command;
1292 struct resource *r;
1293
1294 for_each_pci_dev(dev) {
1295 pci_read_config_word(dev, PCI_COMMAND, &command);
1296 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1297 r = &dev->resource[idx];
1298 if (r->parent)
1299 continue;
1300 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1301 continue;
1302
1303
1304
1305 if (idx == PCI_ROM_RESOURCE )
1306 disabled = 1;
1307 if (r->flags & IORESOURCE_IO)
1308 disabled = !(command & PCI_COMMAND_IO);
1309 else
1310 disabled = !(command & PCI_COMMAND_MEMORY);
1311 if (pass == disabled)
1312 alloc_resource(dev, idx);
1313 }
1314 if (pass)
1315 continue;
1316 r = &dev->resource[PCI_ROM_RESOURCE];
1317 if (r->flags) {
1318
1319
1320
1321 u32 reg;
1322 pci_read_config_dword(dev, dev->rom_base_reg, ®);
1323 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1324 pr_debug("PCI: Switching off ROM of %s\n",
1325 pci_name(dev));
1326 r->flags &= ~IORESOURCE_ROM_ENABLE;
1327 pci_write_config_dword(dev, dev->rom_base_reg,
1328 reg & ~PCI_ROM_ADDRESS_ENABLE);
1329 }
1330 }
1331 }
1332}
1333
1334static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1335{
1336 struct pci_controller *hose = pci_bus_to_host(bus);
1337 resource_size_t offset;
1338 struct resource *res, *pres;
1339 int i;
1340
1341 pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
1342
1343
1344 if (!(hose->io_resource.flags & IORESOURCE_IO))
1345 goto no_io;
1346 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1347 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1348 BUG_ON(res == NULL);
1349 res->name = "Legacy IO";
1350 res->flags = IORESOURCE_IO;
1351 res->start = offset;
1352 res->end = (offset + 0xfff) & 0xfffffffful;
1353 pr_debug("Candidate legacy IO: %pR\n", res);
1354 if (request_resource(&hose->io_resource, res)) {
1355 printk(KERN_DEBUG
1356 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1357 pci_domain_nr(bus), bus->number, res);
1358 kfree(res);
1359 }
1360
1361 no_io:
1362
1363 for (i = 0; i < 3; i++) {
1364 pres = &hose->mem_resources[i];
1365 offset = hose->mem_offset[i];
1366 if (!(pres->flags & IORESOURCE_MEM))
1367 continue;
1368 pr_debug("hose mem res: %pR\n", pres);
1369 if ((pres->start - offset) <= 0xa0000 &&
1370 (pres->end - offset) >= 0xbffff)
1371 break;
1372 }
1373 if (i >= 3)
1374 return;
1375 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1376 BUG_ON(res == NULL);
1377 res->name = "Legacy VGA memory";
1378 res->flags = IORESOURCE_MEM;
1379 res->start = 0xa0000 + offset;
1380 res->end = 0xbffff + offset;
1381 pr_debug("Candidate VGA memory: %pR\n", res);
1382 if (request_resource(pres, res)) {
1383 printk(KERN_DEBUG
1384 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1385 pci_domain_nr(bus), bus->number, res);
1386 kfree(res);
1387 }
1388}
1389
1390void __init pcibios_resource_survey(void)
1391{
1392 struct pci_bus *b;
1393
1394
1395 list_for_each_entry(b, &pci_root_buses, node)
1396 pcibios_allocate_bus_resources(b);
1397 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
1398 pcibios_allocate_resources(0);
1399 pcibios_allocate_resources(1);
1400 }
1401
1402
1403
1404
1405
1406 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1407 list_for_each_entry(b, &pci_root_buses, node)
1408 pcibios_reserve_legacy_regions(b);
1409 }
1410
1411
1412
1413
1414 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1415 pr_debug("PCI: Assigning unassigned resources...\n");
1416 pci_assign_unassigned_resources();
1417 }
1418}
1419
1420
1421
1422
1423
1424
1425void pcibios_claim_one_bus(struct pci_bus *bus)
1426{
1427 struct pci_dev *dev;
1428 struct pci_bus *child_bus;
1429
1430 list_for_each_entry(dev, &bus->devices, bus_list) {
1431 int i;
1432
1433 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1434 struct resource *r = &dev->resource[i];
1435
1436 if (r->parent || !r->start || !r->flags)
1437 continue;
1438
1439 pr_debug("PCI: Claiming %s: Resource %d: %pR\n",
1440 pci_name(dev), i, r);
1441
1442 if (pci_claim_resource(dev, i) == 0)
1443 continue;
1444
1445 pci_claim_bridge_resource(dev, i);
1446 }
1447 }
1448
1449 list_for_each_entry(child_bus, &bus->children, node)
1450 pcibios_claim_one_bus(child_bus);
1451}
1452EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1453
1454
1455
1456
1457
1458
1459
1460
1461void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1462{
1463 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1464 pci_domain_nr(bus), bus->number);
1465
1466
1467 pcibios_allocate_bus_resources(bus);
1468 pcibios_claim_one_bus(bus);
1469 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1470 if (bus->self)
1471 pci_assign_unassigned_bridge_resources(bus->self);
1472 else
1473 pci_assign_unassigned_bus_resources(bus);
1474 }
1475
1476
1477 pci_bus_add_devices(bus);
1478}
1479EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1480
1481int pcibios_enable_device(struct pci_dev *dev, int mask)
1482{
1483 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1484
1485 if (phb->controller_ops.enable_device_hook)
1486 if (!phb->controller_ops.enable_device_hook(dev))
1487 return -EINVAL;
1488
1489 return pci_enable_resources(dev, mask);
1490}
1491
1492void pcibios_disable_device(struct pci_dev *dev)
1493{
1494 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1495
1496 if (phb->controller_ops.disable_device)
1497 phb->controller_ops.disable_device(dev);
1498}
1499
1500resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
1501{
1502 return (unsigned long) hose->io_base_virt - _IO_BASE;
1503}
1504
1505static void pcibios_setup_phb_resources(struct pci_controller *hose,
1506 struct list_head *resources)
1507{
1508 struct resource *res;
1509 resource_size_t offset;
1510 int i;
1511
1512
1513 res = &hose->io_resource;
1514
1515 if (!res->flags) {
1516 pr_debug("PCI: I/O resource not set for host"
1517 " bridge %pOF (domain %d)\n",
1518 hose->dn, hose->global_number);
1519 } else {
1520 offset = pcibios_io_space_offset(hose);
1521
1522 pr_debug("PCI: PHB IO resource = %pR off 0x%08llx\n",
1523 res, (unsigned long long)offset);
1524 pci_add_resource_offset(resources, res, offset);
1525 }
1526
1527
1528 for (i = 0; i < 3; ++i) {
1529 res = &hose->mem_resources[i];
1530 if (!res->flags)
1531 continue;
1532
1533 offset = hose->mem_offset[i];
1534 pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
1535 res, (unsigned long long)offset);
1536
1537 pci_add_resource_offset(resources, res, offset);
1538 }
1539}
1540
1541
1542
1543
1544
1545#define NULL_PCI_OP(rw, size, type) \
1546static int \
1547null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1548{ \
1549 return PCIBIOS_DEVICE_NOT_FOUND; \
1550}
1551
1552static int
1553null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1554 int len, u32 *val)
1555{
1556 return PCIBIOS_DEVICE_NOT_FOUND;
1557}
1558
1559static int
1560null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1561 int len, u32 val)
1562{
1563 return PCIBIOS_DEVICE_NOT_FOUND;
1564}
1565
1566static struct pci_ops null_pci_ops =
1567{
1568 .read = null_read_config,
1569 .write = null_write_config,
1570};
1571
1572
1573
1574
1575
1576static struct pci_bus *
1577fake_pci_bus(struct pci_controller *hose, int busnr)
1578{
1579 static struct pci_bus bus;
1580
1581 if (hose == NULL) {
1582 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1583 }
1584 bus.number = busnr;
1585 bus.sysdata = hose;
1586 bus.ops = hose? hose->ops: &null_pci_ops;
1587 return &bus;
1588}
1589
1590#define EARLY_PCI_OP(rw, size, type) \
1591int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1592 int devfn, int offset, type value) \
1593{ \
1594 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1595 devfn, offset, value); \
1596}
1597
1598EARLY_PCI_OP(read, byte, u8 *)
1599EARLY_PCI_OP(read, word, u16 *)
1600EARLY_PCI_OP(read, dword, u32 *)
1601EARLY_PCI_OP(write, byte, u8)
1602EARLY_PCI_OP(write, word, u16)
1603EARLY_PCI_OP(write, dword, u32)
1604
1605int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1606 int cap)
1607{
1608 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1609}
1610
1611struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1612{
1613 struct pci_controller *hose = bus->sysdata;
1614
1615 return of_node_get(hose->dn);
1616}
1617
1618
1619
1620
1621
1622void pcibios_scan_phb(struct pci_controller *hose)
1623{
1624 LIST_HEAD(resources);
1625 struct pci_bus *bus;
1626 struct device_node *node = hose->dn;
1627 int mode;
1628
1629 pr_debug("PCI: Scanning PHB %pOF\n", node);
1630
1631
1632 pcibios_setup_phb_io_space(hose);
1633
1634
1635 pcibios_setup_phb_resources(hose, &resources);
1636
1637 hose->busn.start = hose->first_busno;
1638 hose->busn.end = hose->last_busno;
1639 hose->busn.flags = IORESOURCE_BUS;
1640 pci_add_resource(&resources, &hose->busn);
1641
1642
1643 bus = pci_create_root_bus(hose->parent, hose->first_busno,
1644 hose->ops, hose, &resources);
1645 if (bus == NULL) {
1646 pr_err("Failed to create bus for PCI domain %04x\n",
1647 hose->global_number);
1648 pci_free_resource_list(&resources);
1649 return;
1650 }
1651 hose->bus = bus;
1652
1653
1654 mode = PCI_PROBE_NORMAL;
1655 if (node && hose->controller_ops.probe_mode)
1656 mode = hose->controller_ops.probe_mode(bus);
1657 pr_debug(" probe mode: %d\n", mode);
1658 if (mode == PCI_PROBE_DEVTREE)
1659 of_scan_bus(node, bus);
1660
1661 if (mode == PCI_PROBE_NORMAL) {
1662 pci_bus_update_busn_res_end(bus, 255);
1663 hose->last_busno = pci_scan_child_bus(bus);
1664 pci_bus_update_busn_res_end(bus, hose->last_busno);
1665 }
1666
1667
1668
1669
1670 if (ppc_md.pcibios_fixup_phb)
1671 ppc_md.pcibios_fixup_phb(hose);
1672
1673
1674 if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1675 struct pci_bus *child;
1676 list_for_each_entry(child, &bus->children, node)
1677 pcie_bus_configure_settings(child);
1678 }
1679}
1680EXPORT_SYMBOL_GPL(pcibios_scan_phb);
1681
1682static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
1683{
1684 int i, class = dev->class >> 8;
1685
1686 int prog_if = dev->class & 0xf;
1687
1688 if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
1689 class == PCI_CLASS_BRIDGE_OTHER) &&
1690 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
1691 (prog_if == 0) &&
1692 (dev->bus->parent == NULL)) {
1693 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1694 dev->resource[i].start = 0;
1695 dev->resource[i].end = 0;
1696 dev->resource[i].flags = 0;
1697 }
1698 }
1699}
1700DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1701DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1702
1703
1704static int __init discover_phbs(void)
1705{
1706 if (ppc_md.discover_phbs)
1707 ppc_md.discover_phbs();
1708
1709 return 0;
1710}
1711core_initcall(discover_phbs);
1712