1
2
3
4
5#include <linux/kernel.h>
6#include <linux/delay.h>
7#include <linux/init.h>
8#include <linux/pci.h>
9#include <linux/slab.h>
10#include <linux/module.h>
11#include <linux/cpumask.h>
12#include <linux/pci-aspm.h>
13#include "pci.h"
14
15#define CARDBUS_LATENCY_TIMER 176
16#define CARDBUS_RESERVE_BUSNR 3
17
18
19LIST_HEAD(pci_root_buses);
20EXPORT_SYMBOL(pci_root_buses);
21
22
23static int find_anything(struct device *dev, void *data)
24{
25 return 1;
26}
27
28
29
30
31
32
33int no_pci_devices(void)
34{
35 struct device *dev;
36 int no_devices;
37
38 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
39 no_devices = (dev == NULL);
40 put_device(dev);
41 return no_devices;
42}
43EXPORT_SYMBOL(no_pci_devices);
44
45
46
47
48static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
49 int type,
50 struct device_attribute *attr,
51 char *buf)
52{
53 int ret;
54 const struct cpumask *cpumask;
55
56 cpumask = cpumask_of_pcibus(to_pci_bus(dev));
57 ret = type?
58 cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask) :
59 cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask);
60 buf[ret++] = '\n';
61 buf[ret] = '\0';
62 return ret;
63}
64
65static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
66 struct device_attribute *attr,
67 char *buf)
68{
69 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
70}
71
72static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
73 struct device_attribute *attr,
74 char *buf)
75{
76 return pci_bus_show_cpuaffinity(dev, 1, attr, buf);
77}
78
79DEVICE_ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpumaskaffinity, NULL);
80DEVICE_ATTR(cpulistaffinity, S_IRUGO, pci_bus_show_cpulistaffinity, NULL);
81
82
83
84
85static void release_pcibus_dev(struct device *dev)
86{
87 struct pci_bus *pci_bus = to_pci_bus(dev);
88
89 if (pci_bus->bridge)
90 put_device(pci_bus->bridge);
91 kfree(pci_bus);
92}
93
94static struct class pcibus_class = {
95 .name = "pci_bus",
96 .dev_release = &release_pcibus_dev,
97};
98
99static int __init pcibus_class_init(void)
100{
101 return class_register(&pcibus_class);
102}
103postcore_initcall(pcibus_class_init);
104
105
106
107
108
109static inline unsigned int pci_calc_resource_flags(unsigned int flags)
110{
111 if (flags & PCI_BASE_ADDRESS_SPACE_IO)
112 return IORESOURCE_IO;
113
114 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
115 return IORESOURCE_MEM | IORESOURCE_PREFETCH;
116
117 return IORESOURCE_MEM;
118}
119
120static u64 pci_size(u64 base, u64 maxbase, u64 mask)
121{
122 u64 size = mask & maxbase;
123 if (!size)
124 return 0;
125
126
127
128 size = (size & ~(size-1)) - 1;
129
130
131
132 if (base == maxbase && ((base | size) & mask) != mask)
133 return 0;
134
135 return size;
136}
137
138static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar)
139{
140 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
141 res->flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
142 return pci_bar_io;
143 }
144
145 res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
146
147 if (res->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
148 return pci_bar_mem64;
149 return pci_bar_mem32;
150}
151
152
153
154
155
156
157
158
159
160
161int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
162 struct resource *res, unsigned int pos)
163{
164 u32 l, sz, mask;
165
166 mask = type ? ~PCI_ROM_ADDRESS_ENABLE : ~0;
167
168 res->name = pci_name(dev);
169
170 pci_read_config_dword(dev, pos, &l);
171 pci_write_config_dword(dev, pos, mask);
172 pci_read_config_dword(dev, pos, &sz);
173 pci_write_config_dword(dev, pos, l);
174
175
176
177
178
179
180
181 if (!sz || sz == 0xffffffff)
182 goto fail;
183
184
185
186
187
188 if (l == 0xffffffff)
189 l = 0;
190
191 if (type == pci_bar_unknown) {
192 type = decode_bar(res, l);
193 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
194 if (type == pci_bar_io) {
195 l &= PCI_BASE_ADDRESS_IO_MASK;
196 mask = PCI_BASE_ADDRESS_IO_MASK & 0xffff;
197 } else {
198 l &= PCI_BASE_ADDRESS_MEM_MASK;
199 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
200 }
201 } else {
202 res->flags |= (l & IORESOURCE_ROM_ENABLE);
203 l &= PCI_ROM_ADDRESS_MASK;
204 mask = (u32)PCI_ROM_ADDRESS_MASK;
205 }
206
207 if (type == pci_bar_mem64) {
208 u64 l64 = l;
209 u64 sz64 = sz;
210 u64 mask64 = mask | (u64)~0 << 32;
211
212 pci_read_config_dword(dev, pos + 4, &l);
213 pci_write_config_dword(dev, pos + 4, ~0);
214 pci_read_config_dword(dev, pos + 4, &sz);
215 pci_write_config_dword(dev, pos + 4, l);
216
217 l64 |= ((u64)l << 32);
218 sz64 |= ((u64)sz << 32);
219
220 sz64 = pci_size(l64, sz64, mask64);
221
222 if (!sz64)
223 goto fail;
224
225 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
226 dev_err(&dev->dev, "can't handle 64-bit BAR\n");
227 goto fail;
228 } else if ((sizeof(resource_size_t) < 8) && l) {
229
230 pci_write_config_dword(dev, pos, 0);
231 pci_write_config_dword(dev, pos + 4, 0);
232 res->start = 0;
233 res->end = sz64;
234 } else {
235 res->start = l64;
236 res->end = l64 + sz64;
237 dev_printk(KERN_DEBUG, &dev->dev,
238 "reg %x 64bit mmio: %pR\n", pos, res);
239 }
240 } else {
241 sz = pci_size(l, sz, mask);
242
243 if (!sz)
244 goto fail;
245
246 res->start = l;
247 res->end = l + sz;
248
249 dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos,
250 (res->flags & IORESOURCE_IO) ? "io port" : "32bit mmio",
251 res);
252 }
253
254 out:
255 return (type == pci_bar_mem64) ? 1 : 0;
256 fail:
257 res->flags = 0;
258 goto out;
259}
260
261static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
262{
263 unsigned int pos, reg;
264
265 for (pos = 0; pos < howmany; pos++) {
266 struct resource *res = &dev->resource[pos];
267 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
268 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
269 }
270
271 if (rom) {
272 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
273 dev->rom_base_reg = rom;
274 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
275 IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
276 IORESOURCE_SIZEALIGN;
277 __pci_read_base(dev, pci_bar_mem32, res, rom);
278 }
279}
280
281void __devinit pci_read_bridge_bases(struct pci_bus *child)
282{
283 struct pci_dev *dev = child->self;
284 u8 io_base_lo, io_limit_lo;
285 u16 mem_base_lo, mem_limit_lo;
286 unsigned long base, limit;
287 struct resource *res;
288 int i;
289
290 if (!child->parent)
291 return;
292
293 if (dev->transparent) {
294 dev_info(&dev->dev, "transparent bridge\n");
295 for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++)
296 child->resource[i] = child->parent->resource[i - 3];
297 }
298
299 res = child->resource[0];
300 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
301 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
302 base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
303 limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
304
305 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
306 u16 io_base_hi, io_limit_hi;
307 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
308 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
309 base |= (io_base_hi << 16);
310 limit |= (io_limit_hi << 16);
311 }
312
313 if (base <= limit) {
314 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
315 if (!res->start)
316 res->start = base;
317 if (!res->end)
318 res->end = limit + 0xfff;
319 dev_printk(KERN_DEBUG, &dev->dev, "bridge io port: %pR\n", res);
320 }
321
322 res = child->resource[1];
323 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
324 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
325 base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
326 limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
327 if (base <= limit) {
328 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
329 res->start = base;
330 res->end = limit + 0xfffff;
331 dev_printk(KERN_DEBUG, &dev->dev, "bridge 32bit mmio: %pR\n",
332 res);
333 }
334
335 res = child->resource[2];
336 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
337 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
338 base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
339 limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
340
341 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
342 u32 mem_base_hi, mem_limit_hi;
343 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
344 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
345
346
347
348
349
350
351 if (mem_base_hi <= mem_limit_hi) {
352#if BITS_PER_LONG == 64
353 base |= ((long) mem_base_hi) << 32;
354 limit |= ((long) mem_limit_hi) << 32;
355#else
356 if (mem_base_hi || mem_limit_hi) {
357 dev_err(&dev->dev, "can't handle 64-bit "
358 "address space for bridge\n");
359 return;
360 }
361#endif
362 }
363 }
364 if (base <= limit) {
365 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
366 res->start = base;
367 res->end = limit + 0xfffff;
368 dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n",
369 (res->flags & PCI_PREF_RANGE_TYPE_64) ? "64" : "32",
370 res);
371 }
372}
373
374static struct pci_bus * pci_alloc_bus(void)
375{
376 struct pci_bus *b;
377
378 b = kzalloc(sizeof(*b), GFP_KERNEL);
379 if (b) {
380 INIT_LIST_HEAD(&b->node);
381 INIT_LIST_HEAD(&b->children);
382 INIT_LIST_HEAD(&b->devices);
383 INIT_LIST_HEAD(&b->slots);
384 }
385 return b;
386}
387
388static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
389 struct pci_dev *bridge, int busnr)
390{
391 struct pci_bus *child;
392 int i;
393
394
395
396
397 child = pci_alloc_bus();
398 if (!child)
399 return NULL;
400
401 child->parent = parent;
402 child->ops = parent->ops;
403 child->sysdata = parent->sysdata;
404 child->bus_flags = parent->bus_flags;
405
406
407
408
409
410 child->dev.class = &pcibus_class;
411 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
412
413
414
415
416
417 child->number = child->secondary = busnr;
418 child->primary = parent->secondary;
419 child->subordinate = 0xff;
420
421 if (!bridge)
422 return child;
423
424 child->self = bridge;
425 child->bridge = get_device(&bridge->dev);
426
427
428 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
429 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
430 child->resource[i]->name = child->name;
431 }
432 bridge->subordinate = child;
433
434 return child;
435}
436
437struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
438{
439 struct pci_bus *child;
440
441 child = pci_alloc_child_bus(parent, dev, busnr);
442 if (child) {
443 down_write(&pci_bus_sem);
444 list_add_tail(&child->node, &parent->children);
445 up_write(&pci_bus_sem);
446 }
447 return child;
448}
449
450static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
451{
452 struct pci_bus *parent = child->parent;
453
454
455
456 if (!pcibios_assign_all_busses())
457 return;
458
459 while (parent->parent && parent->subordinate < max) {
460 parent->subordinate = max;
461 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
462 parent = parent->parent;
463 }
464}
465
466
467
468
469
470
471
472
473
474
475
476int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
477{
478 struct pci_bus *child;
479 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
480 u32 buses, i, j = 0;
481 u16 bctl;
482 int broken = 0;
483
484 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
485
486 dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n",
487 buses & 0xffffff, pass);
488
489
490 if (!pass &&
491 ((buses & 0xff) != bus->number || ((buses >> 8) & 0xff) <= bus->number)) {
492 dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
493 broken = 1;
494 }
495
496
497
498 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
499 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
500 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
501
502 if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus && !broken) {
503 unsigned int cmax, busnr;
504
505
506
507
508 if (pass)
509 goto out;
510 busnr = (buses >> 8) & 0xFF;
511
512
513
514
515
516
517
518
519 child = pci_find_bus(pci_domain_nr(bus), busnr);
520 if (!child) {
521 child = pci_add_new_bus(bus, dev, busnr);
522 if (!child)
523 goto out;
524 child->primary = buses & 0xFF;
525 child->subordinate = (buses >> 16) & 0xFF;
526 child->bridge_ctl = bctl;
527 }
528
529 cmax = pci_scan_child_bus(child);
530 if (cmax > max)
531 max = cmax;
532 if (child->subordinate > max)
533 max = child->subordinate;
534 } else {
535
536
537
538
539 if (!pass) {
540 if (pcibios_assign_all_busses() || broken)
541
542
543
544
545
546
547 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
548 buses & ~0xffffff);
549 goto out;
550 }
551
552
553 pci_write_config_word(dev, PCI_STATUS, 0xffff);
554
555
556
557 if (pci_find_bus(pci_domain_nr(bus), max+1))
558 goto out;
559 child = pci_add_new_bus(bus, dev, ++max);
560 buses = (buses & 0xff000000)
561 | ((unsigned int)(child->primary) << 0)
562 | ((unsigned int)(child->secondary) << 8)
563 | ((unsigned int)(child->subordinate) << 16);
564
565
566
567
568
569 if (is_cardbus) {
570 buses &= ~0xff000000;
571 buses |= CARDBUS_LATENCY_TIMER << 24;
572 }
573
574
575
576
577 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
578
579 if (!is_cardbus) {
580 child->bridge_ctl = bctl;
581
582
583
584
585
586
587 pci_fixup_parent_subordinate_busnr(child, max);
588
589 max = pci_scan_child_bus(child);
590
591
592
593
594 pci_fixup_parent_subordinate_busnr(child, max);
595 } else {
596
597
598
599
600
601 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
602 struct pci_bus *parent = bus;
603 if (pci_find_bus(pci_domain_nr(bus),
604 max+i+1))
605 break;
606 while (parent->parent) {
607 if ((!pcibios_assign_all_busses()) &&
608 (parent->subordinate > max) &&
609 (parent->subordinate <= max+i)) {
610 j = 1;
611 }
612 parent = parent->parent;
613 }
614 if (j) {
615
616
617
618
619
620 i /= 2;
621 break;
622 }
623 }
624 max += i;
625 pci_fixup_parent_subordinate_busnr(child, max);
626 }
627
628
629
630 child->subordinate = max;
631 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
632 }
633
634 sprintf(child->name,
635 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
636 pci_domain_nr(bus), child->number);
637
638
639 while (bus->parent) {
640 if ((child->subordinate > bus->subordinate) ||
641 (child->number > bus->subordinate) ||
642 (child->number < bus->number) ||
643 (child->subordinate < bus->number)) {
644 pr_debug("PCI: Bus #%02x (-#%02x) is %s "
645 "hidden behind%s bridge #%02x (-#%02x)\n",
646 child->number, child->subordinate,
647 (bus->number > child->subordinate &&
648 bus->subordinate < child->number) ?
649 "wholly" : "partially",
650 bus->self->transparent ? " transparent" : "",
651 bus->number, bus->subordinate);
652 }
653 bus = bus->parent;
654 }
655
656out:
657 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
658
659 return max;
660}
661
662
663
664
665
666static void pci_read_irq(struct pci_dev *dev)
667{
668 unsigned char irq;
669
670 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
671 dev->pin = irq;
672 if (irq)
673 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
674 dev->irq = irq;
675}
676
677static void set_pcie_port_type(struct pci_dev *pdev)
678{
679 int pos;
680 u16 reg16;
681
682 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
683 if (!pos)
684 return;
685 pdev->is_pcie = 1;
686 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
687 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
688}
689
690#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
691
692
693
694
695
696
697
698
699
700
701
702int pci_setup_device(struct pci_dev *dev)
703{
704 u32 class;
705 u8 hdr_type;
706 struct pci_slot *slot;
707
708 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
709 return -EIO;
710
711 dev->sysdata = dev->bus->sysdata;
712 dev->dev.parent = dev->bus->bridge;
713 dev->dev.bus = &pci_bus_type;
714 dev->hdr_type = hdr_type & 0x7f;
715 dev->multifunction = !!(hdr_type & 0x80);
716 dev->error_state = pci_channel_io_normal;
717 set_pcie_port_type(dev);
718
719 list_for_each_entry(slot, &dev->bus->slots, list)
720 if (PCI_SLOT(dev->devfn) == slot->number)
721 dev->slot = slot;
722
723
724
725 dev->dma_mask = 0xffffffff;
726
727 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
728 dev->bus->number, PCI_SLOT(dev->devfn),
729 PCI_FUNC(dev->devfn));
730
731 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
732 dev->revision = class & 0xff;
733 class >>= 8;
734 dev->class = class;
735 class >>= 8;
736
737 dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n",
738 dev->vendor, dev->device, class, dev->hdr_type);
739
740
741 dev->cfg_size = pci_cfg_space_size(dev);
742
743
744 dev->current_state = PCI_UNKNOWN;
745
746
747 pci_fixup_device(pci_fixup_early, dev);
748
749 class = dev->class >> 8;
750
751 switch (dev->hdr_type) {
752 case PCI_HEADER_TYPE_NORMAL:
753 if (class == PCI_CLASS_BRIDGE_PCI)
754 goto bad;
755 pci_read_irq(dev);
756 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
757 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
758 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
759
760
761
762
763
764
765
766 if (class == PCI_CLASS_STORAGE_IDE) {
767 u8 progif;
768 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
769 if ((progif & 1) == 0) {
770 dev->resource[0].start = 0x1F0;
771 dev->resource[0].end = 0x1F7;
772 dev->resource[0].flags = LEGACY_IO_RESOURCE;
773 dev->resource[1].start = 0x3F6;
774 dev->resource[1].end = 0x3F6;
775 dev->resource[1].flags = LEGACY_IO_RESOURCE;
776 }
777 if ((progif & 4) == 0) {
778 dev->resource[2].start = 0x170;
779 dev->resource[2].end = 0x177;
780 dev->resource[2].flags = LEGACY_IO_RESOURCE;
781 dev->resource[3].start = 0x376;
782 dev->resource[3].end = 0x376;
783 dev->resource[3].flags = LEGACY_IO_RESOURCE;
784 }
785 }
786 break;
787
788 case PCI_HEADER_TYPE_BRIDGE:
789 if (class != PCI_CLASS_BRIDGE_PCI)
790 goto bad;
791
792
793
794 pci_read_irq(dev);
795 dev->transparent = ((dev->class & 0xff) == 1);
796 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
797 break;
798
799 case PCI_HEADER_TYPE_CARDBUS:
800 if (class != PCI_CLASS_BRIDGE_CARDBUS)
801 goto bad;
802 pci_read_irq(dev);
803 pci_read_bases(dev, 1, 0);
804 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
805 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
806 break;
807
808 default:
809 dev_err(&dev->dev, "unknown header type %02x, "
810 "ignoring device\n", dev->hdr_type);
811 return -EIO;
812
813 bad:
814 dev_err(&dev->dev, "ignoring class %02x (doesn't match header "
815 "type %02x)\n", class, dev->hdr_type);
816 dev->class = PCI_CLASS_NOT_DEFINED;
817 }
818
819
820 return 0;
821}
822
823static void pci_release_capabilities(struct pci_dev *dev)
824{
825 pci_vpd_release(dev);
826 pci_iov_release(dev);
827}
828
829
830
831
832
833
834
835
836static void pci_release_dev(struct device *dev)
837{
838 struct pci_dev *pci_dev;
839
840 pci_dev = to_pci_dev(dev);
841 pci_release_capabilities(pci_dev);
842 kfree(pci_dev);
843}
844
845
846
847
848
849
850
851
852
853
854
855
856int pci_cfg_space_size_ext(struct pci_dev *dev)
857{
858 u32 status;
859 int pos = PCI_CFG_SPACE_SIZE;
860
861 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
862 goto fail;
863 if (status == 0xffffffff)
864 goto fail;
865
866 return PCI_CFG_SPACE_EXP_SIZE;
867
868 fail:
869 return PCI_CFG_SPACE_SIZE;
870}
871
872int pci_cfg_space_size(struct pci_dev *dev)
873{
874 int pos;
875 u32 status;
876 u16 class;
877
878 class = dev->class >> 8;
879 if (class == PCI_CLASS_BRIDGE_HOST)
880 return pci_cfg_space_size_ext(dev);
881
882 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
883 if (!pos) {
884 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
885 if (!pos)
886 goto fail;
887
888 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
889 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
890 goto fail;
891 }
892
893 return pci_cfg_space_size_ext(dev);
894
895 fail:
896 return PCI_CFG_SPACE_SIZE;
897}
898
899static void pci_release_bus_bridge_dev(struct device *dev)
900{
901 kfree(dev);
902}
903
904struct pci_dev *alloc_pci_dev(void)
905{
906 struct pci_dev *dev;
907
908 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
909 if (!dev)
910 return NULL;
911
912 INIT_LIST_HEAD(&dev->bus_list);
913
914 return dev;
915}
916EXPORT_SYMBOL(alloc_pci_dev);
917
918
919
920
921
922static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
923{
924 struct pci_dev *dev;
925 u32 l;
926 int delay = 1;
927
928 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
929 return NULL;
930
931
932 if (l == 0xffffffff || l == 0x00000000 ||
933 l == 0x0000ffff || l == 0xffff0000)
934 return NULL;
935
936
937 while (l == 0xffff0001) {
938 msleep(delay);
939 delay *= 2;
940 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
941 return NULL;
942
943 if (delay > 60 * 1000) {
944 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
945 "responding\n", pci_domain_nr(bus),
946 bus->number, PCI_SLOT(devfn),
947 PCI_FUNC(devfn));
948 return NULL;
949 }
950 }
951
952 dev = alloc_pci_dev();
953 if (!dev)
954 return NULL;
955
956 dev->bus = bus;
957 dev->devfn = devfn;
958 dev->vendor = l & 0xffff;
959 dev->device = (l >> 16) & 0xffff;
960
961 if (pci_setup_device(dev)) {
962 kfree(dev);
963 return NULL;
964 }
965
966 return dev;
967}
968
969static void pci_init_capabilities(struct pci_dev *dev)
970{
971
972 pci_msi_init_pci_dev(dev);
973
974
975 pci_allocate_cap_save_buffers(dev);
976
977
978 pci_pm_init(dev);
979 platform_pci_wakeup_init(dev);
980
981
982 pci_vpd_pci22_init(dev);
983
984
985 pci_enable_ari(dev);
986
987
988 pci_iov_init(dev);
989}
990
991void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
992{
993 device_initialize(&dev->dev);
994 dev->dev.release = pci_release_dev;
995 pci_dev_get(dev);
996
997 dev->dev.dma_mask = &dev->dma_mask;
998 dev->dev.dma_parms = &dev->dma_parms;
999 dev->dev.coherent_dma_mask = 0xffffffffull;
1000
1001 pci_set_dma_max_seg_size(dev, 65536);
1002 pci_set_dma_seg_boundary(dev, 0xffffffff);
1003
1004
1005 pci_fixup_device(pci_fixup_header, dev);
1006
1007
1008 pci_init_capabilities(dev);
1009
1010
1011
1012
1013
1014 down_write(&pci_bus_sem);
1015 list_add_tail(&dev->bus_list, &bus->devices);
1016 up_write(&pci_bus_sem);
1017}
1018
1019struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1020{
1021 struct pci_dev *dev;
1022
1023 dev = pci_get_slot(bus, devfn);
1024 if (dev) {
1025 pci_dev_put(dev);
1026 return dev;
1027 }
1028
1029 dev = pci_scan_device(bus, devfn);
1030 if (!dev)
1031 return NULL;
1032
1033 pci_device_add(dev, bus);
1034
1035 return dev;
1036}
1037EXPORT_SYMBOL(pci_scan_single_device);
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050int pci_scan_slot(struct pci_bus *bus, int devfn)
1051{
1052 int fn, nr = 0;
1053 struct pci_dev *dev;
1054
1055 dev = pci_scan_single_device(bus, devfn);
1056 if (dev && !dev->is_added)
1057 nr++;
1058
1059 if ((dev && dev->multifunction) ||
1060 (!dev && pcibios_scan_all_fns(bus, devfn))) {
1061 for (fn = 1; fn < 8; fn++) {
1062 dev = pci_scan_single_device(bus, devfn + fn);
1063 if (dev) {
1064 if (!dev->is_added)
1065 nr++;
1066 dev->multifunction = 1;
1067 }
1068 }
1069 }
1070
1071
1072 if (bus->self && nr)
1073 pcie_aspm_init_link_state(bus->self);
1074
1075 return nr;
1076}
1077
1078unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1079{
1080 unsigned int devfn, pass, max = bus->secondary;
1081 struct pci_dev *dev;
1082
1083 pr_debug("PCI: Scanning bus %04x:%02x\n", pci_domain_nr(bus), bus->number);
1084
1085
1086 for (devfn = 0; devfn < 0x100; devfn += 8)
1087 pci_scan_slot(bus, devfn);
1088
1089
1090 max += pci_iov_bus_range(bus);
1091
1092
1093
1094
1095
1096 if (!bus->is_added) {
1097 pr_debug("PCI: Fixups for bus %04x:%02x\n",
1098 pci_domain_nr(bus), bus->number);
1099 pcibios_fixup_bus(bus);
1100 if (pci_is_root_bus(bus))
1101 bus->is_added = 1;
1102 }
1103
1104 for (pass=0; pass < 2; pass++)
1105 list_for_each_entry(dev, &bus->devices, bus_list) {
1106 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1107 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1108 max = pci_scan_bridge(bus, dev, max, pass);
1109 }
1110
1111
1112
1113
1114
1115
1116
1117
1118 pr_debug("PCI: Bus scan for %04x:%02x returning with max=%02x\n",
1119 pci_domain_nr(bus), bus->number, max);
1120 return max;
1121}
1122
1123struct pci_bus * pci_create_bus(struct device *parent,
1124 int bus, struct pci_ops *ops, void *sysdata)
1125{
1126 int error;
1127 struct pci_bus *b;
1128 struct device *dev;
1129
1130 b = pci_alloc_bus();
1131 if (!b)
1132 return NULL;
1133
1134 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1135 if (!dev){
1136 kfree(b);
1137 return NULL;
1138 }
1139
1140 b->sysdata = sysdata;
1141 b->ops = ops;
1142
1143 if (pci_find_bus(pci_domain_nr(b), bus)) {
1144
1145 pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus);
1146 goto err_out;
1147 }
1148
1149 down_write(&pci_bus_sem);
1150 list_add_tail(&b->node, &pci_root_buses);
1151 up_write(&pci_bus_sem);
1152
1153 dev->parent = parent;
1154 dev->release = pci_release_bus_bridge_dev;
1155 dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1156 error = device_register(dev);
1157 if (error)
1158 goto dev_reg_err;
1159 b->bridge = get_device(dev);
1160
1161 if (!parent)
1162 set_dev_node(b->bridge, pcibus_to_node(b));
1163
1164 b->dev.class = &pcibus_class;
1165 b->dev.parent = b->bridge;
1166 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1167 error = device_register(&b->dev);
1168 if (error)
1169 goto class_dev_reg_err;
1170 error = device_create_file(&b->dev, &dev_attr_cpuaffinity);
1171 if (error)
1172 goto dev_create_file_err;
1173
1174
1175 pci_create_legacy_files(b);
1176
1177 b->number = b->secondary = bus;
1178 b->resource[0] = &ioport_resource;
1179 b->resource[1] = &iomem_resource;
1180
1181 return b;
1182
1183dev_create_file_err:
1184 device_unregister(&b->dev);
1185class_dev_reg_err:
1186 device_unregister(dev);
1187dev_reg_err:
1188 down_write(&pci_bus_sem);
1189 list_del(&b->node);
1190 up_write(&pci_bus_sem);
1191err_out:
1192 kfree(dev);
1193 kfree(b);
1194 return NULL;
1195}
1196
1197struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
1198 int bus, struct pci_ops *ops, void *sysdata)
1199{
1200 struct pci_bus *b;
1201
1202 b = pci_create_bus(parent, bus, ops, sysdata);
1203 if (b)
1204 b->subordinate = pci_scan_child_bus(b);
1205 return b;
1206}
1207EXPORT_SYMBOL(pci_scan_bus_parented);
1208
1209#ifdef CONFIG_HOTPLUG
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
1220{
1221 unsigned int max;
1222 struct pci_dev *dev;
1223
1224 max = pci_scan_child_bus(bus);
1225
1226 down_read(&pci_bus_sem);
1227 list_for_each_entry(dev, &bus->devices, bus_list)
1228 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1229 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1230 if (dev->subordinate)
1231 pci_bus_size_bridges(dev->subordinate);
1232 up_read(&pci_bus_sem);
1233
1234 pci_bus_assign_resources(bus);
1235 pci_enable_bridges(bus);
1236 pci_bus_add_devices(bus);
1237
1238 return max;
1239}
1240EXPORT_SYMBOL_GPL(pci_rescan_bus);
1241
1242EXPORT_SYMBOL(pci_add_new_bus);
1243EXPORT_SYMBOL(pci_scan_slot);
1244EXPORT_SYMBOL(pci_scan_bridge);
1245EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1246#endif
1247
1248static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
1249{
1250 const struct pci_dev *a = to_pci_dev(d_a);
1251 const struct pci_dev *b = to_pci_dev(d_b);
1252
1253 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
1254 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
1255
1256 if (a->bus->number < b->bus->number) return -1;
1257 else if (a->bus->number > b->bus->number) return 1;
1258
1259 if (a->devfn < b->devfn) return -1;
1260 else if (a->devfn > b->devfn) return 1;
1261
1262 return 0;
1263}
1264
1265void __init pci_sort_breadthfirst(void)
1266{
1267 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
1268}
1269