1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/pci.h>
19#include <linux/stat.h>
20#include <linux/export.h>
21#include <linux/topology.h>
22#include <linux/mm.h>
23#include <linux/fs.h>
24#include <linux/capability.h>
25#include <linux/security.h>
26#include <linux/slab.h>
27#include <linux/vgaarb.h>
28#include <linux/pm_runtime.h>
29#include <linux/of.h>
30#include "pci.h"
31
32static int sysfs_initialized;
33
34
35#define pci_config_attr(field, format_string) \
36static ssize_t \
37field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
38{ \
39 struct pci_dev *pdev; \
40 \
41 pdev = to_pci_dev(dev); \
42 return sysfs_emit(buf, format_string, pdev->field); \
43} \
44static DEVICE_ATTR_RO(field)
45
46pci_config_attr(vendor, "0x%04x\n");
47pci_config_attr(device, "0x%04x\n");
48pci_config_attr(subsystem_vendor, "0x%04x\n");
49pci_config_attr(subsystem_device, "0x%04x\n");
50pci_config_attr(revision, "0x%02x\n");
51pci_config_attr(class, "0x%06x\n");
52pci_config_attr(irq, "%u\n");
53
54static ssize_t broken_parity_status_show(struct device *dev,
55 struct device_attribute *attr,
56 char *buf)
57{
58 struct pci_dev *pdev = to_pci_dev(dev);
59 return sysfs_emit(buf, "%u\n", pdev->broken_parity_status);
60}
61
62static ssize_t broken_parity_status_store(struct device *dev,
63 struct device_attribute *attr,
64 const char *buf, size_t count)
65{
66 struct pci_dev *pdev = to_pci_dev(dev);
67 unsigned long val;
68
69 if (kstrtoul(buf, 0, &val) < 0)
70 return -EINVAL;
71
72 pdev->broken_parity_status = !!val;
73
74 return count;
75}
76static DEVICE_ATTR_RW(broken_parity_status);
77
78static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list,
79 struct device_attribute *attr, char *buf)
80{
81 const struct cpumask *mask;
82
83#ifdef CONFIG_NUMA
84 mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
85 cpumask_of_node(dev_to_node(dev));
86#else
87 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
88#endif
89 return cpumap_print_to_pagebuf(list, buf, mask);
90}
91
92static ssize_t local_cpus_show(struct device *dev,
93 struct device_attribute *attr, char *buf)
94{
95 return pci_dev_show_local_cpu(dev, false, attr, buf);
96}
97static DEVICE_ATTR_RO(local_cpus);
98
99static ssize_t local_cpulist_show(struct device *dev,
100 struct device_attribute *attr, char *buf)
101{
102 return pci_dev_show_local_cpu(dev, true, attr, buf);
103}
104static DEVICE_ATTR_RO(local_cpulist);
105
106
107
108
109static ssize_t cpuaffinity_show(struct device *dev,
110 struct device_attribute *attr, char *buf)
111{
112 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
113
114 return cpumap_print_to_pagebuf(false, buf, cpumask);
115}
116static DEVICE_ATTR_RO(cpuaffinity);
117
118static ssize_t cpulistaffinity_show(struct device *dev,
119 struct device_attribute *attr, char *buf)
120{
121 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
122
123 return cpumap_print_to_pagebuf(true, buf, cpumask);
124}
125static DEVICE_ATTR_RO(cpulistaffinity);
126
127static ssize_t power_state_show(struct device *dev,
128 struct device_attribute *attr, char *buf)
129{
130 struct pci_dev *pdev = to_pci_dev(dev);
131
132 return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state));
133}
134static DEVICE_ATTR_RO(power_state);
135
136
137static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
138 char *buf)
139{
140 struct pci_dev *pci_dev = to_pci_dev(dev);
141 int i;
142 int max;
143 resource_size_t start, end;
144 size_t len = 0;
145
146 if (pci_dev->subordinate)
147 max = DEVICE_COUNT_RESOURCE;
148 else
149 max = PCI_BRIDGE_RESOURCES;
150
151 for (i = 0; i < max; i++) {
152 struct resource *res = &pci_dev->resource[i];
153 pci_resource_to_user(pci_dev, i, res, &start, &end);
154 len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n",
155 (unsigned long long)start,
156 (unsigned long long)end,
157 (unsigned long long)res->flags);
158 }
159 return len;
160}
161static DEVICE_ATTR_RO(resource);
162
163static ssize_t max_link_speed_show(struct device *dev,
164 struct device_attribute *attr, char *buf)
165{
166 struct pci_dev *pdev = to_pci_dev(dev);
167
168 return sysfs_emit(buf, "%s\n",
169 pci_speed_string(pcie_get_speed_cap(pdev)));
170}
171static DEVICE_ATTR_RO(max_link_speed);
172
173static ssize_t max_link_width_show(struct device *dev,
174 struct device_attribute *attr, char *buf)
175{
176 struct pci_dev *pdev = to_pci_dev(dev);
177
178 return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
179}
180static DEVICE_ATTR_RO(max_link_width);
181
182static ssize_t current_link_speed_show(struct device *dev,
183 struct device_attribute *attr, char *buf)
184{
185 struct pci_dev *pci_dev = to_pci_dev(dev);
186 u16 linkstat;
187 int err;
188 enum pci_bus_speed speed;
189
190 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
191 if (err)
192 return -EINVAL;
193
194 speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS];
195
196 return sysfs_emit(buf, "%s\n", pci_speed_string(speed));
197}
198static DEVICE_ATTR_RO(current_link_speed);
199
200static ssize_t current_link_width_show(struct device *dev,
201 struct device_attribute *attr, char *buf)
202{
203 struct pci_dev *pci_dev = to_pci_dev(dev);
204 u16 linkstat;
205 int err;
206
207 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
208 if (err)
209 return -EINVAL;
210
211 return sysfs_emit(buf, "%u\n",
212 (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
213}
214static DEVICE_ATTR_RO(current_link_width);
215
216static ssize_t secondary_bus_number_show(struct device *dev,
217 struct device_attribute *attr,
218 char *buf)
219{
220 struct pci_dev *pci_dev = to_pci_dev(dev);
221 u8 sec_bus;
222 int err;
223
224 err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
225 if (err)
226 return -EINVAL;
227
228 return sysfs_emit(buf, "%u\n", sec_bus);
229}
230static DEVICE_ATTR_RO(secondary_bus_number);
231
232static ssize_t subordinate_bus_number_show(struct device *dev,
233 struct device_attribute *attr,
234 char *buf)
235{
236 struct pci_dev *pci_dev = to_pci_dev(dev);
237 u8 sub_bus;
238 int err;
239
240 err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
241 if (err)
242 return -EINVAL;
243
244 return sysfs_emit(buf, "%u\n", sub_bus);
245}
246static DEVICE_ATTR_RO(subordinate_bus_number);
247
248static ssize_t ari_enabled_show(struct device *dev,
249 struct device_attribute *attr,
250 char *buf)
251{
252 struct pci_dev *pci_dev = to_pci_dev(dev);
253
254 return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus));
255}
256static DEVICE_ATTR_RO(ari_enabled);
257
258static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
259 char *buf)
260{
261 struct pci_dev *pci_dev = to_pci_dev(dev);
262
263 return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
264 pci_dev->vendor, pci_dev->device,
265 pci_dev->subsystem_vendor, pci_dev->subsystem_device,
266 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
267 (u8)(pci_dev->class));
268}
269static DEVICE_ATTR_RO(modalias);
270
271static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
272 const char *buf, size_t count)
273{
274 struct pci_dev *pdev = to_pci_dev(dev);
275 unsigned long val;
276 ssize_t result = kstrtoul(buf, 0, &val);
277
278 if (result < 0)
279 return result;
280
281
282 if (!capable(CAP_SYS_ADMIN))
283 return -EPERM;
284
285 device_lock(dev);
286 if (dev->driver)
287 result = -EBUSY;
288 else if (val)
289 result = pci_enable_device(pdev);
290 else if (pci_is_enabled(pdev))
291 pci_disable_device(pdev);
292 else
293 result = -EIO;
294 device_unlock(dev);
295
296 return result < 0 ? result : count;
297}
298
299static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
300 char *buf)
301{
302 struct pci_dev *pdev;
303
304 pdev = to_pci_dev(dev);
305 return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt));
306}
307static DEVICE_ATTR_RW(enable);
308
309#ifdef CONFIG_NUMA
310static ssize_t numa_node_store(struct device *dev,
311 struct device_attribute *attr, const char *buf,
312 size_t count)
313{
314 struct pci_dev *pdev = to_pci_dev(dev);
315 int node, ret;
316
317 if (!capable(CAP_SYS_ADMIN))
318 return -EPERM;
319
320 ret = kstrtoint(buf, 0, &node);
321 if (ret)
322 return ret;
323
324 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
325 return -EINVAL;
326
327 if (node != NUMA_NO_NODE && !node_online(node))
328 return -EINVAL;
329
330 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
331 pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.",
332 node);
333
334 dev->numa_node = node;
335 return count;
336}
337
338static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
339 char *buf)
340{
341 return sysfs_emit(buf, "%d\n", dev->numa_node);
342}
343static DEVICE_ATTR_RW(numa_node);
344#endif
345
346static ssize_t dma_mask_bits_show(struct device *dev,
347 struct device_attribute *attr, char *buf)
348{
349 struct pci_dev *pdev = to_pci_dev(dev);
350
351 return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask));
352}
353static DEVICE_ATTR_RO(dma_mask_bits);
354
355static ssize_t consistent_dma_mask_bits_show(struct device *dev,
356 struct device_attribute *attr,
357 char *buf)
358{
359 return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask));
360}
361static DEVICE_ATTR_RO(consistent_dma_mask_bits);
362
363static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
364 char *buf)
365{
366 struct pci_dev *pdev = to_pci_dev(dev);
367 struct pci_bus *subordinate = pdev->subordinate;
368
369 return sysfs_emit(buf, "%u\n", subordinate ?
370 !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
371 : !pdev->no_msi);
372}
373
374static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
375 const char *buf, size_t count)
376{
377 struct pci_dev *pdev = to_pci_dev(dev);
378 struct pci_bus *subordinate = pdev->subordinate;
379 unsigned long val;
380
381 if (kstrtoul(buf, 0, &val) < 0)
382 return -EINVAL;
383
384 if (!capable(CAP_SYS_ADMIN))
385 return -EPERM;
386
387
388
389
390
391
392 if (!subordinate) {
393 pdev->no_msi = !val;
394 pci_info(pdev, "MSI/MSI-X %s for future drivers\n",
395 val ? "allowed" : "disallowed");
396 return count;
397 }
398
399 if (val)
400 subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI;
401 else
402 subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
403
404 dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n",
405 val ? "allowed" : "disallowed");
406 return count;
407}
408static DEVICE_ATTR_RW(msi_bus);
409
410static ssize_t rescan_store(struct bus_type *bus, const char *buf, size_t count)
411{
412 unsigned long val;
413 struct pci_bus *b = NULL;
414
415 if (kstrtoul(buf, 0, &val) < 0)
416 return -EINVAL;
417
418 if (val) {
419 pci_lock_rescan_remove();
420 while ((b = pci_find_next_bus(b)) != NULL)
421 pci_rescan_bus(b);
422 pci_unlock_rescan_remove();
423 }
424 return count;
425}
426static BUS_ATTR_WO(rescan);
427
428static struct attribute *pci_bus_attrs[] = {
429 &bus_attr_rescan.attr,
430 NULL,
431};
432
433static const struct attribute_group pci_bus_group = {
434 .attrs = pci_bus_attrs,
435};
436
437const struct attribute_group *pci_bus_groups[] = {
438 &pci_bus_group,
439 NULL,
440};
441
442static ssize_t dev_rescan_store(struct device *dev,
443 struct device_attribute *attr, const char *buf,
444 size_t count)
445{
446 unsigned long val;
447 struct pci_dev *pdev = to_pci_dev(dev);
448
449 if (kstrtoul(buf, 0, &val) < 0)
450 return -EINVAL;
451
452 if (val) {
453 pci_lock_rescan_remove();
454 pci_rescan_bus(pdev->bus);
455 pci_unlock_rescan_remove();
456 }
457 return count;
458}
459static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL,
460 dev_rescan_store);
461
462static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
463 const char *buf, size_t count)
464{
465 unsigned long val;
466
467 if (kstrtoul(buf, 0, &val) < 0)
468 return -EINVAL;
469
470 if (val && device_remove_file_self(dev, attr))
471 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
472 return count;
473}
474static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL,
475 remove_store);
476
477static ssize_t bus_rescan_store(struct device *dev,
478 struct device_attribute *attr,
479 const char *buf, size_t count)
480{
481 unsigned long val;
482 struct pci_bus *bus = to_pci_bus(dev);
483
484 if (kstrtoul(buf, 0, &val) < 0)
485 return -EINVAL;
486
487 if (val) {
488 pci_lock_rescan_remove();
489 if (!pci_is_root_bus(bus) && list_empty(&bus->devices))
490 pci_rescan_bus_bridge_resize(bus->self);
491 else
492 pci_rescan_bus(bus);
493 pci_unlock_rescan_remove();
494 }
495 return count;
496}
497static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
498 bus_rescan_store);
499
500#if defined(CONFIG_PM) && defined(CONFIG_ACPI)
501static ssize_t d3cold_allowed_store(struct device *dev,
502 struct device_attribute *attr,
503 const char *buf, size_t count)
504{
505 struct pci_dev *pdev = to_pci_dev(dev);
506 unsigned long val;
507
508 if (kstrtoul(buf, 0, &val) < 0)
509 return -EINVAL;
510
511 pdev->d3cold_allowed = !!val;
512 if (pdev->d3cold_allowed)
513 pci_d3cold_enable(pdev);
514 else
515 pci_d3cold_disable(pdev);
516
517 pm_runtime_resume(dev);
518
519 return count;
520}
521
522static ssize_t d3cold_allowed_show(struct device *dev,
523 struct device_attribute *attr, char *buf)
524{
525 struct pci_dev *pdev = to_pci_dev(dev);
526 return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed);
527}
528static DEVICE_ATTR_RW(d3cold_allowed);
529#endif
530
531#ifdef CONFIG_OF
532static ssize_t devspec_show(struct device *dev,
533 struct device_attribute *attr, char *buf)
534{
535 struct pci_dev *pdev = to_pci_dev(dev);
536 struct device_node *np = pci_device_to_OF_node(pdev);
537
538 if (np == NULL)
539 return 0;
540 return sysfs_emit(buf, "%pOF\n", np);
541}
542static DEVICE_ATTR_RO(devspec);
543#endif
544
545static ssize_t driver_override_store(struct device *dev,
546 struct device_attribute *attr,
547 const char *buf, size_t count)
548{
549 struct pci_dev *pdev = to_pci_dev(dev);
550 char *driver_override, *old, *cp;
551
552
553 if (count >= (PAGE_SIZE - 1))
554 return -EINVAL;
555
556 driver_override = kstrndup(buf, count, GFP_KERNEL);
557 if (!driver_override)
558 return -ENOMEM;
559
560 cp = strchr(driver_override, '\n');
561 if (cp)
562 *cp = '\0';
563
564 device_lock(dev);
565 old = pdev->driver_override;
566 if (strlen(driver_override)) {
567 pdev->driver_override = driver_override;
568 } else {
569 kfree(driver_override);
570 pdev->driver_override = NULL;
571 }
572 device_unlock(dev);
573
574 kfree(old);
575
576 return count;
577}
578
579static ssize_t driver_override_show(struct device *dev,
580 struct device_attribute *attr, char *buf)
581{
582 struct pci_dev *pdev = to_pci_dev(dev);
583 ssize_t len;
584
585 device_lock(dev);
586 len = sysfs_emit(buf, "%s\n", pdev->driver_override);
587 device_unlock(dev);
588 return len;
589}
590static DEVICE_ATTR_RW(driver_override);
591
592static struct attribute *pci_dev_attrs[] = {
593 &dev_attr_power_state.attr,
594 &dev_attr_resource.attr,
595 &dev_attr_vendor.attr,
596 &dev_attr_device.attr,
597 &dev_attr_subsystem_vendor.attr,
598 &dev_attr_subsystem_device.attr,
599 &dev_attr_revision.attr,
600 &dev_attr_class.attr,
601 &dev_attr_irq.attr,
602 &dev_attr_local_cpus.attr,
603 &dev_attr_local_cpulist.attr,
604 &dev_attr_modalias.attr,
605#ifdef CONFIG_NUMA
606 &dev_attr_numa_node.attr,
607#endif
608 &dev_attr_dma_mask_bits.attr,
609 &dev_attr_consistent_dma_mask_bits.attr,
610 &dev_attr_enable.attr,
611 &dev_attr_broken_parity_status.attr,
612 &dev_attr_msi_bus.attr,
613#if defined(CONFIG_PM) && defined(CONFIG_ACPI)
614 &dev_attr_d3cold_allowed.attr,
615#endif
616#ifdef CONFIG_OF
617 &dev_attr_devspec.attr,
618#endif
619 &dev_attr_driver_override.attr,
620 &dev_attr_ari_enabled.attr,
621 NULL,
622};
623
624static struct attribute *pci_bridge_attrs[] = {
625 &dev_attr_subordinate_bus_number.attr,
626 &dev_attr_secondary_bus_number.attr,
627 NULL,
628};
629
630static struct attribute *pcie_dev_attrs[] = {
631 &dev_attr_current_link_speed.attr,
632 &dev_attr_current_link_width.attr,
633 &dev_attr_max_link_width.attr,
634 &dev_attr_max_link_speed.attr,
635 NULL,
636};
637
638static struct attribute *pcibus_attrs[] = {
639 &dev_attr_bus_rescan.attr,
640 &dev_attr_cpuaffinity.attr,
641 &dev_attr_cpulistaffinity.attr,
642 NULL,
643};
644
645static const struct attribute_group pcibus_group = {
646 .attrs = pcibus_attrs,
647};
648
649const struct attribute_group *pcibus_groups[] = {
650 &pcibus_group,
651 NULL,
652};
653
654static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
655 char *buf)
656{
657 struct pci_dev *pdev = to_pci_dev(dev);
658 struct pci_dev *vga_dev = vga_default_device();
659
660 if (vga_dev)
661 return sysfs_emit(buf, "%u\n", (pdev == vga_dev));
662
663 return sysfs_emit(buf, "%u\n",
664 !!(pdev->resource[PCI_ROM_RESOURCE].flags &
665 IORESOURCE_ROM_SHADOW));
666}
667static DEVICE_ATTR_RO(boot_vga);
668
669static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
670 struct bin_attribute *bin_attr, char *buf,
671 loff_t off, size_t count)
672{
673 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
674 unsigned int size = 64;
675 loff_t init_off = off;
676 u8 *data = (u8 *) buf;
677
678
679 if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
680 size = dev->cfg_size;
681 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
682 size = 128;
683
684 if (off > size)
685 return 0;
686 if (off + count > size) {
687 size -= off;
688 count = size;
689 } else {
690 size = count;
691 }
692
693 pci_config_pm_runtime_get(dev);
694
695 if ((off & 1) && size) {
696 u8 val;
697 pci_user_read_config_byte(dev, off, &val);
698 data[off - init_off] = val;
699 off++;
700 size--;
701 }
702
703 if ((off & 3) && size > 2) {
704 u16 val;
705 pci_user_read_config_word(dev, off, &val);
706 data[off - init_off] = val & 0xff;
707 data[off - init_off + 1] = (val >> 8) & 0xff;
708 off += 2;
709 size -= 2;
710 }
711
712 while (size > 3) {
713 u32 val;
714 pci_user_read_config_dword(dev, off, &val);
715 data[off - init_off] = val & 0xff;
716 data[off - init_off + 1] = (val >> 8) & 0xff;
717 data[off - init_off + 2] = (val >> 16) & 0xff;
718 data[off - init_off + 3] = (val >> 24) & 0xff;
719 off += 4;
720 size -= 4;
721 cond_resched();
722 }
723
724 if (size >= 2) {
725 u16 val;
726 pci_user_read_config_word(dev, off, &val);
727 data[off - init_off] = val & 0xff;
728 data[off - init_off + 1] = (val >> 8) & 0xff;
729 off += 2;
730 size -= 2;
731 }
732
733 if (size > 0) {
734 u8 val;
735 pci_user_read_config_byte(dev, off, &val);
736 data[off - init_off] = val;
737 off++;
738 --size;
739 }
740
741 pci_config_pm_runtime_put(dev);
742
743 return count;
744}
745
746static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
747 struct bin_attribute *bin_attr, char *buf,
748 loff_t off, size_t count)
749{
750 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
751 unsigned int size = count;
752 loff_t init_off = off;
753 u8 *data = (u8 *) buf;
754 int ret;
755
756 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
757 if (ret)
758 return ret;
759
760 if (off > dev->cfg_size)
761 return 0;
762 if (off + count > dev->cfg_size) {
763 size = dev->cfg_size - off;
764 count = size;
765 }
766
767 pci_config_pm_runtime_get(dev);
768
769 if ((off & 1) && size) {
770 pci_user_write_config_byte(dev, off, data[off - init_off]);
771 off++;
772 size--;
773 }
774
775 if ((off & 3) && size > 2) {
776 u16 val = data[off - init_off];
777 val |= (u16) data[off - init_off + 1] << 8;
778 pci_user_write_config_word(dev, off, val);
779 off += 2;
780 size -= 2;
781 }
782
783 while (size > 3) {
784 u32 val = data[off - init_off];
785 val |= (u32) data[off - init_off + 1] << 8;
786 val |= (u32) data[off - init_off + 2] << 16;
787 val |= (u32) data[off - init_off + 3] << 24;
788 pci_user_write_config_dword(dev, off, val);
789 off += 4;
790 size -= 4;
791 }
792
793 if (size >= 2) {
794 u16 val = data[off - init_off];
795 val |= (u16) data[off - init_off + 1] << 8;
796 pci_user_write_config_word(dev, off, val);
797 off += 2;
798 size -= 2;
799 }
800
801 if (size) {
802 pci_user_write_config_byte(dev, off, data[off - init_off]);
803 off++;
804 --size;
805 }
806
807 pci_config_pm_runtime_put(dev);
808
809 return count;
810}
811static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
812
813static struct bin_attribute *pci_dev_config_attrs[] = {
814 &bin_attr_config,
815 NULL,
816};
817
818static umode_t pci_dev_config_attr_is_visible(struct kobject *kobj,
819 struct bin_attribute *a, int n)
820{
821 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
822
823 a->size = PCI_CFG_SPACE_SIZE;
824 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
825 a->size = PCI_CFG_SPACE_EXP_SIZE;
826
827 return a->attr.mode;
828}
829
830static const struct attribute_group pci_dev_config_attr_group = {
831 .bin_attrs = pci_dev_config_attrs,
832 .is_bin_visible = pci_dev_config_attr_is_visible,
833};
834
835#ifdef HAVE_PCI_LEGACY
836
837
838
839
840
841
842
843
844
845
846
847
848static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
849 struct bin_attribute *bin_attr, char *buf,
850 loff_t off, size_t count)
851{
852 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
853
854
855 if (count != 1 && count != 2 && count != 4)
856 return -EINVAL;
857
858 return pci_legacy_read(bus, off, (u32 *)buf, count);
859}
860
861
862
863
864
865
866
867
868
869
870
871
872
873static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
874 struct bin_attribute *bin_attr, char *buf,
875 loff_t off, size_t count)
876{
877 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
878
879
880 if (count != 1 && count != 2 && count != 4)
881 return -EINVAL;
882
883 return pci_legacy_write(bus, off, *(u32 *)buf, count);
884}
885
886
887
888
889
890
891
892
893
894
895
896
897static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
898 struct bin_attribute *attr,
899 struct vm_area_struct *vma)
900{
901 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
902
903 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
904}
905
906
907
908
909
910
911
912
913
914
915
916
917static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
918 struct bin_attribute *attr,
919 struct vm_area_struct *vma)
920{
921 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
922
923 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
924}
925
926
927
928
929
930
931
932
933void __weak pci_adjust_legacy_attr(struct pci_bus *b,
934 enum pci_mmap_state mmap_type)
935{
936}
937
938
939
940
941
942
943
944
945
946
947
948
949void pci_create_legacy_files(struct pci_bus *b)
950{
951 int error;
952
953 if (!sysfs_initialized)
954 return;
955
956 b->legacy_io = kcalloc(2, sizeof(struct bin_attribute),
957 GFP_ATOMIC);
958 if (!b->legacy_io)
959 goto kzalloc_err;
960
961 sysfs_bin_attr_init(b->legacy_io);
962 b->legacy_io->attr.name = "legacy_io";
963 b->legacy_io->size = 0xffff;
964 b->legacy_io->attr.mode = 0600;
965 b->legacy_io->read = pci_read_legacy_io;
966 b->legacy_io->write = pci_write_legacy_io;
967 b->legacy_io->mmap = pci_mmap_legacy_io;
968 b->legacy_io->mapping = iomem_get_mapping();
969 pci_adjust_legacy_attr(b, pci_mmap_io);
970 error = device_create_bin_file(&b->dev, b->legacy_io);
971 if (error)
972 goto legacy_io_err;
973
974
975 b->legacy_mem = b->legacy_io + 1;
976 sysfs_bin_attr_init(b->legacy_mem);
977 b->legacy_mem->attr.name = "legacy_mem";
978 b->legacy_mem->size = 1024*1024;
979 b->legacy_mem->attr.mode = 0600;
980 b->legacy_mem->mmap = pci_mmap_legacy_mem;
981 b->legacy_mem->mapping = iomem_get_mapping();
982 pci_adjust_legacy_attr(b, pci_mmap_mem);
983 error = device_create_bin_file(&b->dev, b->legacy_mem);
984 if (error)
985 goto legacy_mem_err;
986
987 return;
988
989legacy_mem_err:
990 device_remove_bin_file(&b->dev, b->legacy_io);
991legacy_io_err:
992 kfree(b->legacy_io);
993 b->legacy_io = NULL;
994kzalloc_err:
995 dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n");
996}
997
998void pci_remove_legacy_files(struct pci_bus *b)
999{
1000 if (b->legacy_io) {
1001 device_remove_bin_file(&b->dev, b->legacy_io);
1002 device_remove_bin_file(&b->dev, b->legacy_mem);
1003 kfree(b->legacy_io);
1004 }
1005}
1006#endif
1007
1008#if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
1009
1010int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
1011 enum pci_mmap_api mmap_api)
1012{
1013 unsigned long nr, start, size;
1014 resource_size_t pci_start = 0, pci_end;
1015
1016 if (pci_resource_len(pdev, resno) == 0)
1017 return 0;
1018 nr = vma_pages(vma);
1019 start = vma->vm_pgoff;
1020 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
1021 if (mmap_api == PCI_MMAP_PROCFS) {
1022 pci_resource_to_user(pdev, resno, &pdev->resource[resno],
1023 &pci_start, &pci_end);
1024 pci_start >>= PAGE_SHIFT;
1025 }
1026 if (start >= pci_start && start < pci_start + size &&
1027 start + nr <= pci_start + size)
1028 return 1;
1029 return 0;
1030}
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
1042 struct vm_area_struct *vma, int write_combine)
1043{
1044 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1045 int bar = (unsigned long)attr->private;
1046 enum pci_mmap_state mmap_type;
1047 struct resource *res = &pdev->resource[bar];
1048 int ret;
1049
1050 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1051 if (ret)
1052 return ret;
1053
1054 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
1055 return -EINVAL;
1056
1057 if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS))
1058 return -EINVAL;
1059
1060 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
1061
1062 return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine);
1063}
1064
1065static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
1066 struct bin_attribute *attr,
1067 struct vm_area_struct *vma)
1068{
1069 return pci_mmap_resource(kobj, attr, vma, 0);
1070}
1071
1072static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
1073 struct bin_attribute *attr,
1074 struct vm_area_struct *vma)
1075{
1076 return pci_mmap_resource(kobj, attr, vma, 1);
1077}
1078
1079static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
1080 struct bin_attribute *attr, char *buf,
1081 loff_t off, size_t count, bool write)
1082{
1083 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1084 int bar = (unsigned long)attr->private;
1085 unsigned long port = off;
1086
1087 port += pci_resource_start(pdev, bar);
1088
1089 if (port > pci_resource_end(pdev, bar))
1090 return 0;
1091
1092 if (port + count - 1 > pci_resource_end(pdev, bar))
1093 return -EINVAL;
1094
1095 switch (count) {
1096 case 1:
1097 if (write)
1098 outb(*(u8 *)buf, port);
1099 else
1100 *(u8 *)buf = inb(port);
1101 return 1;
1102 case 2:
1103 if (write)
1104 outw(*(u16 *)buf, port);
1105 else
1106 *(u16 *)buf = inw(port);
1107 return 2;
1108 case 4:
1109 if (write)
1110 outl(*(u32 *)buf, port);
1111 else
1112 *(u32 *)buf = inl(port);
1113 return 4;
1114 }
1115 return -EINVAL;
1116}
1117
1118static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
1119 struct bin_attribute *attr, char *buf,
1120 loff_t off, size_t count)
1121{
1122 return pci_resource_io(filp, kobj, attr, buf, off, count, false);
1123}
1124
1125static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
1126 struct bin_attribute *attr, char *buf,
1127 loff_t off, size_t count)
1128{
1129 int ret;
1130
1131 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1132 if (ret)
1133 return ret;
1134
1135 return pci_resource_io(filp, kobj, attr, buf, off, count, true);
1136}
1137
1138
1139
1140
1141
1142
1143
1144
1145static void pci_remove_resource_files(struct pci_dev *pdev)
1146{
1147 int i;
1148
1149 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1150 struct bin_attribute *res_attr;
1151
1152 res_attr = pdev->res_attr[i];
1153 if (res_attr) {
1154 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1155 kfree(res_attr);
1156 }
1157
1158 res_attr = pdev->res_attr_wc[i];
1159 if (res_attr) {
1160 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1161 kfree(res_attr);
1162 }
1163 }
1164}
1165
1166static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
1167{
1168
1169 int name_len = write_combine ? 13 : 10;
1170 struct bin_attribute *res_attr;
1171 char *res_attr_name;
1172 int retval;
1173
1174 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
1175 if (!res_attr)
1176 return -ENOMEM;
1177
1178 res_attr_name = (char *)(res_attr + 1);
1179
1180 sysfs_bin_attr_init(res_attr);
1181 if (write_combine) {
1182 pdev->res_attr_wc[num] = res_attr;
1183 sprintf(res_attr_name, "resource%d_wc", num);
1184 res_attr->mmap = pci_mmap_resource_wc;
1185 } else {
1186 pdev->res_attr[num] = res_attr;
1187 sprintf(res_attr_name, "resource%d", num);
1188 if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
1189 res_attr->read = pci_read_resource_io;
1190 res_attr->write = pci_write_resource_io;
1191 if (arch_can_pci_mmap_io())
1192 res_attr->mmap = pci_mmap_resource_uc;
1193 } else {
1194 res_attr->mmap = pci_mmap_resource_uc;
1195 }
1196 }
1197 if (res_attr->mmap)
1198 res_attr->mapping = iomem_get_mapping();
1199 res_attr->attr.name = res_attr_name;
1200 res_attr->attr.mode = 0600;
1201 res_attr->size = pci_resource_len(pdev, num);
1202 res_attr->private = (void *)(unsigned long)num;
1203 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
1204 if (retval)
1205 kfree(res_attr);
1206
1207 return retval;
1208}
1209
1210
1211
1212
1213
1214
1215
1216static int pci_create_resource_files(struct pci_dev *pdev)
1217{
1218 int i;
1219 int retval;
1220
1221
1222 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1223
1224
1225 if (!pci_resource_len(pdev, i))
1226 continue;
1227
1228 retval = pci_create_attr(pdev, i, 0);
1229
1230 if (!retval && arch_can_pci_mmap_wc() &&
1231 pdev->resource[i].flags & IORESOURCE_PREFETCH)
1232 retval = pci_create_attr(pdev, i, 1);
1233 if (retval) {
1234 pci_remove_resource_files(pdev);
1235 return retval;
1236 }
1237 }
1238 return 0;
1239}
1240#else
1241int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
1242void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
1243#endif
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
1257 struct bin_attribute *bin_attr, char *buf,
1258 loff_t off, size_t count)
1259{
1260 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1261
1262 if ((off == 0) && (*buf == '0') && (count == 2))
1263 pdev->rom_attr_enabled = 0;
1264 else
1265 pdev->rom_attr_enabled = 1;
1266
1267 return count;
1268}
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
1283 struct bin_attribute *bin_attr, char *buf,
1284 loff_t off, size_t count)
1285{
1286 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1287 void __iomem *rom;
1288 size_t size;
1289
1290 if (!pdev->rom_attr_enabled)
1291 return -EINVAL;
1292
1293 rom = pci_map_rom(pdev, &size);
1294 if (!rom || !size)
1295 return -EIO;
1296
1297 if (off >= size)
1298 count = 0;
1299 else {
1300 if (off + count > size)
1301 count = size - off;
1302
1303 memcpy_fromio(buf, rom + off, count);
1304 }
1305 pci_unmap_rom(pdev, rom);
1306
1307 return count;
1308}
1309static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
1310
1311static struct bin_attribute *pci_dev_rom_attrs[] = {
1312 &bin_attr_rom,
1313 NULL,
1314};
1315
1316static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj,
1317 struct bin_attribute *a, int n)
1318{
1319 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1320 size_t rom_size;
1321
1322
1323 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
1324 if (!rom_size)
1325 return 0;
1326
1327 a->size = rom_size;
1328
1329 return a->attr.mode;
1330}
1331
1332static const struct attribute_group pci_dev_rom_attr_group = {
1333 .bin_attrs = pci_dev_rom_attrs,
1334 .is_bin_visible = pci_dev_rom_attr_is_visible,
1335};
1336
1337static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
1338 const char *buf, size_t count)
1339{
1340 struct pci_dev *pdev = to_pci_dev(dev);
1341 unsigned long val;
1342 ssize_t result = kstrtoul(buf, 0, &val);
1343
1344 if (result < 0)
1345 return result;
1346
1347 if (val != 1)
1348 return -EINVAL;
1349
1350 pm_runtime_get_sync(dev);
1351 result = pci_reset_function(pdev);
1352 pm_runtime_put(dev);
1353 if (result < 0)
1354 return result;
1355
1356 return count;
1357}
1358static DEVICE_ATTR_WO(reset);
1359
1360static struct attribute *pci_dev_reset_attrs[] = {
1361 &dev_attr_reset.attr,
1362 NULL,
1363};
1364
1365static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj,
1366 struct attribute *a, int n)
1367{
1368 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1369
1370 if (!pdev->reset_fn)
1371 return 0;
1372
1373 return a->mode;
1374}
1375
1376static const struct attribute_group pci_dev_reset_attr_group = {
1377 .attrs = pci_dev_reset_attrs,
1378 .is_visible = pci_dev_reset_attr_is_visible,
1379};
1380
1381int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
1382{
1383 if (!sysfs_initialized)
1384 return -EACCES;
1385
1386 return pci_create_resource_files(pdev);
1387}
1388
1389
1390
1391
1392
1393
1394
1395void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
1396{
1397 if (!sysfs_initialized)
1398 return;
1399
1400 pci_remove_resource_files(pdev);
1401}
1402
1403static int __init pci_sysfs_init(void)
1404{
1405 struct pci_dev *pdev = NULL;
1406 struct pci_bus *pbus = NULL;
1407 int retval;
1408
1409 sysfs_initialized = 1;
1410 for_each_pci_dev(pdev) {
1411 retval = pci_create_sysfs_dev_files(pdev);
1412 if (retval) {
1413 pci_dev_put(pdev);
1414 return retval;
1415 }
1416 }
1417
1418 while ((pbus = pci_find_next_bus(pbus)))
1419 pci_create_legacy_files(pbus);
1420
1421 return 0;
1422}
1423late_initcall(pci_sysfs_init);
1424
1425static struct attribute *pci_dev_dev_attrs[] = {
1426 &dev_attr_boot_vga.attr,
1427 NULL,
1428};
1429
1430static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
1431 struct attribute *a, int n)
1432{
1433 struct device *dev = kobj_to_dev(kobj);
1434 struct pci_dev *pdev = to_pci_dev(dev);
1435
1436 if (a == &dev_attr_boot_vga.attr)
1437 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
1438 return 0;
1439
1440 return a->mode;
1441}
1442
1443static struct attribute *pci_dev_hp_attrs[] = {
1444 &dev_attr_remove.attr,
1445 &dev_attr_dev_rescan.attr,
1446 NULL,
1447};
1448
1449static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
1450 struct attribute *a, int n)
1451{
1452 struct device *dev = kobj_to_dev(kobj);
1453 struct pci_dev *pdev = to_pci_dev(dev);
1454
1455 if (pdev->is_virtfn)
1456 return 0;
1457
1458 return a->mode;
1459}
1460
1461static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj,
1462 struct attribute *a, int n)
1463{
1464 struct device *dev = kobj_to_dev(kobj);
1465 struct pci_dev *pdev = to_pci_dev(dev);
1466
1467 if (pci_is_bridge(pdev))
1468 return a->mode;
1469
1470 return 0;
1471}
1472
1473static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj,
1474 struct attribute *a, int n)
1475{
1476 struct device *dev = kobj_to_dev(kobj);
1477 struct pci_dev *pdev = to_pci_dev(dev);
1478
1479 if (pci_is_pcie(pdev))
1480 return a->mode;
1481
1482 return 0;
1483}
1484
1485static const struct attribute_group pci_dev_group = {
1486 .attrs = pci_dev_attrs,
1487};
1488
1489const struct attribute_group *pci_dev_groups[] = {
1490 &pci_dev_group,
1491 &pci_dev_config_attr_group,
1492 &pci_dev_rom_attr_group,
1493 &pci_dev_reset_attr_group,
1494 &pci_dev_vpd_attr_group,
1495#ifdef CONFIG_DMI
1496 &pci_dev_smbios_attr_group,
1497#endif
1498#ifdef CONFIG_ACPI
1499 &pci_dev_acpi_attr_group,
1500#endif
1501 NULL,
1502};
1503
1504static const struct attribute_group pci_dev_hp_attr_group = {
1505 .attrs = pci_dev_hp_attrs,
1506 .is_visible = pci_dev_hp_attrs_are_visible,
1507};
1508
1509static const struct attribute_group pci_dev_attr_group = {
1510 .attrs = pci_dev_dev_attrs,
1511 .is_visible = pci_dev_attrs_are_visible,
1512};
1513
1514static const struct attribute_group pci_bridge_attr_group = {
1515 .attrs = pci_bridge_attrs,
1516 .is_visible = pci_bridge_attrs_are_visible,
1517};
1518
1519static const struct attribute_group pcie_dev_attr_group = {
1520 .attrs = pcie_dev_attrs,
1521 .is_visible = pcie_dev_attrs_are_visible,
1522};
1523
1524static const struct attribute_group *pci_dev_attr_groups[] = {
1525 &pci_dev_attr_group,
1526 &pci_dev_hp_attr_group,
1527#ifdef CONFIG_PCI_IOV
1528 &sriov_pf_dev_attr_group,
1529 &sriov_vf_dev_attr_group,
1530#endif
1531 &pci_bridge_attr_group,
1532 &pcie_dev_attr_group,
1533#ifdef CONFIG_PCIEAER
1534 &aer_stats_attr_group,
1535#endif
1536#ifdef CONFIG_PCIEASPM
1537 &aspm_ctrl_attr_group,
1538#endif
1539 NULL,
1540};
1541
1542const struct device_type pci_dev_type = {
1543 .groups = pci_dev_attr_groups,
1544};
1545