1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) "pci-p2pdma: " fmt
12#include <linux/ctype.h>
13#include <linux/pci-p2pdma.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/genalloc.h>
17#include <linux/memremap.h>
18#include <linux/percpu-refcount.h>
19#include <linux/random.h>
20#include <linux/seq_buf.h>
21#include <linux/xarray.h>
22
23enum pci_p2pdma_map_type {
24 PCI_P2PDMA_MAP_UNKNOWN = 0,
25 PCI_P2PDMA_MAP_NOT_SUPPORTED,
26 PCI_P2PDMA_MAP_BUS_ADDR,
27 PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
28};
29
30struct pci_p2pdma {
31 struct gen_pool *pool;
32 bool p2pmem_published;
33 struct xarray map_types;
34};
35
36struct pci_p2pdma_pagemap {
37 struct dev_pagemap pgmap;
38 struct pci_dev *provider;
39 u64 bus_offset;
40};
41
42static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap)
43{
44 return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap);
45}
46
47static ssize_t size_show(struct device *dev, struct device_attribute *attr,
48 char *buf)
49{
50 struct pci_dev *pdev = to_pci_dev(dev);
51 size_t size = 0;
52
53 if (pdev->p2pdma->pool)
54 size = gen_pool_size(pdev->p2pdma->pool);
55
56 return scnprintf(buf, PAGE_SIZE, "%zd\n", size);
57}
58static DEVICE_ATTR_RO(size);
59
60static ssize_t available_show(struct device *dev, struct device_attribute *attr,
61 char *buf)
62{
63 struct pci_dev *pdev = to_pci_dev(dev);
64 size_t avail = 0;
65
66 if (pdev->p2pdma->pool)
67 avail = gen_pool_avail(pdev->p2pdma->pool);
68
69 return scnprintf(buf, PAGE_SIZE, "%zd\n", avail);
70}
71static DEVICE_ATTR_RO(available);
72
73static ssize_t published_show(struct device *dev, struct device_attribute *attr,
74 char *buf)
75{
76 struct pci_dev *pdev = to_pci_dev(dev);
77
78 return scnprintf(buf, PAGE_SIZE, "%d\n",
79 pdev->p2pdma->p2pmem_published);
80}
81static DEVICE_ATTR_RO(published);
82
83static struct attribute *p2pmem_attrs[] = {
84 &dev_attr_size.attr,
85 &dev_attr_available.attr,
86 &dev_attr_published.attr,
87 NULL,
88};
89
90static const struct attribute_group p2pmem_group = {
91 .attrs = p2pmem_attrs,
92 .name = "p2pmem",
93};
94
95static void pci_p2pdma_release(void *data)
96{
97 struct pci_dev *pdev = data;
98 struct pci_p2pdma *p2pdma = pdev->p2pdma;
99
100 if (!p2pdma)
101 return;
102
103
104 pdev->p2pdma = NULL;
105 synchronize_rcu();
106
107 gen_pool_destroy(p2pdma->pool);
108 sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
109 xa_destroy(&p2pdma->map_types);
110}
111
112static int pci_p2pdma_setup(struct pci_dev *pdev)
113{
114 int error = -ENOMEM;
115 struct pci_p2pdma *p2p;
116
117 p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL);
118 if (!p2p)
119 return -ENOMEM;
120
121 xa_init(&p2p->map_types);
122
123 p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
124 if (!p2p->pool)
125 goto out;
126
127 error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
128 if (error)
129 goto out_pool_destroy;
130
131 pdev->p2pdma = p2p;
132
133 error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group);
134 if (error)
135 goto out_pool_destroy;
136
137 return 0;
138
139out_pool_destroy:
140 pdev->p2pdma = NULL;
141 gen_pool_destroy(p2p->pool);
142out:
143 devm_kfree(&pdev->dev, p2p);
144 return error;
145}
146
147
148
149
150
151
152
153
154
155
156
157int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
158 u64 offset)
159{
160 struct pci_p2pdma_pagemap *p2p_pgmap;
161 struct dev_pagemap *pgmap;
162 void *addr;
163 int error;
164
165 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
166 return -EINVAL;
167
168 if (offset >= pci_resource_len(pdev, bar))
169 return -EINVAL;
170
171 if (!size)
172 size = pci_resource_len(pdev, bar) - offset;
173
174 if (size + offset > pci_resource_len(pdev, bar))
175 return -EINVAL;
176
177 if (!pdev->p2pdma) {
178 error = pci_p2pdma_setup(pdev);
179 if (error)
180 return error;
181 }
182
183 p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
184 if (!p2p_pgmap)
185 return -ENOMEM;
186
187 pgmap = &p2p_pgmap->pgmap;
188 pgmap->range.start = pci_resource_start(pdev, bar) + offset;
189 pgmap->range.end = pgmap->range.start + size - 1;
190 pgmap->nr_range = 1;
191 pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
192
193 p2p_pgmap->provider = pdev;
194 p2p_pgmap->bus_offset = pci_bus_address(pdev, bar) -
195 pci_resource_start(pdev, bar);
196
197 addr = devm_memremap_pages(&pdev->dev, pgmap);
198 if (IS_ERR(addr)) {
199 error = PTR_ERR(addr);
200 goto pgmap_free;
201 }
202
203 error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
204 pci_bus_address(pdev, bar) + offset,
205 range_len(&pgmap->range), dev_to_node(&pdev->dev),
206 pgmap->ref);
207 if (error)
208 goto pages_free;
209
210 pci_info(pdev, "added peer-to-peer DMA memory %#llx-%#llx\n",
211 pgmap->range.start, pgmap->range.end);
212
213 return 0;
214
215pages_free:
216 devm_memunmap_pages(&pdev->dev, pgmap);
217pgmap_free:
218 devm_kfree(&pdev->dev, pgmap);
219 return error;
220}
221EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
222
223
224
225
226
227
228static struct pci_dev *find_parent_pci_dev(struct device *dev)
229{
230 struct device *parent;
231
232 dev = get_device(dev);
233
234 while (dev) {
235 if (dev_is_pci(dev))
236 return to_pci_dev(dev);
237
238 parent = get_device(dev->parent);
239 put_device(dev);
240 dev = parent;
241 }
242
243 return NULL;
244}
245
246
247
248
249
250
251static int pci_bridge_has_acs_redir(struct pci_dev *pdev)
252{
253 int pos;
254 u16 ctrl;
255
256 pos = pdev->acs_cap;
257 if (!pos)
258 return 0;
259
260 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
261
262 if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC))
263 return 1;
264
265 return 0;
266}
267
268static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
269{
270 if (!buf)
271 return;
272
273 seq_buf_printf(buf, "%s;", pci_name(pdev));
274}
275
276static bool cpu_supports_p2pdma(void)
277{
278#ifdef CONFIG_X86
279 struct cpuinfo_x86 *c = &cpu_data(0);
280
281
282 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17)
283 return true;
284#endif
285
286 return false;
287}
288
289static const struct pci_p2pdma_whitelist_entry {
290 unsigned short vendor;
291 unsigned short device;
292 enum {
293 REQ_SAME_HOST_BRIDGE = 1 << 0,
294 } flags;
295} pci_p2pdma_whitelist[] = {
296
297 {PCI_VENDOR_ID_INTEL, 0x3c00, REQ_SAME_HOST_BRIDGE},
298 {PCI_VENDOR_ID_INTEL, 0x3c01, REQ_SAME_HOST_BRIDGE},
299
300 {PCI_VENDOR_ID_INTEL, 0x2f00, REQ_SAME_HOST_BRIDGE},
301 {PCI_VENDOR_ID_INTEL, 0x2f01, REQ_SAME_HOST_BRIDGE},
302
303 {PCI_VENDOR_ID_INTEL, 0x2030, 0},
304 {PCI_VENDOR_ID_INTEL, 0x2031, 0},
305 {PCI_VENDOR_ID_INTEL, 0x2032, 0},
306 {PCI_VENDOR_ID_INTEL, 0x2033, 0},
307 {PCI_VENDOR_ID_INTEL, 0x2020, 0},
308 {}
309};
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327static struct pci_dev *pci_host_bridge_dev(struct pci_host_bridge *host)
328{
329 struct pci_dev *root;
330
331 root = list_first_entry_or_null(&host->bus->devices,
332 struct pci_dev, bus_list);
333
334 if (!root)
335 return NULL;
336 if (root->devfn != PCI_DEVFN(0, 0))
337 return NULL;
338
339 return root;
340}
341
342static bool __host_bridge_whitelist(struct pci_host_bridge *host,
343 bool same_host_bridge)
344{
345 struct pci_dev *root = pci_host_bridge_dev(host);
346 const struct pci_p2pdma_whitelist_entry *entry;
347 unsigned short vendor, device;
348
349 if (!root)
350 return false;
351
352 vendor = root->vendor;
353 device = root->device;
354
355 for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) {
356 if (vendor != entry->vendor || device != entry->device)
357 continue;
358 if (entry->flags & REQ_SAME_HOST_BRIDGE && !same_host_bridge)
359 return false;
360
361 return true;
362 }
363
364 return false;
365}
366
367
368
369
370
371static bool host_bridge_whitelist(struct pci_dev *a, struct pci_dev *b)
372{
373 struct pci_host_bridge *host_a = pci_find_host_bridge(a->bus);
374 struct pci_host_bridge *host_b = pci_find_host_bridge(b->bus);
375
376 if (host_a == host_b)
377 return __host_bridge_whitelist(host_a, true);
378
379 if (__host_bridge_whitelist(host_a, false) &&
380 __host_bridge_whitelist(host_b, false))
381 return true;
382
383 return false;
384}
385
386static enum pci_p2pdma_map_type
387__upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client,
388 int *dist, bool *acs_redirects, struct seq_buf *acs_list)
389{
390 struct pci_dev *a = provider, *b = client, *bb;
391 int dist_a = 0;
392 int dist_b = 0;
393 int acs_cnt = 0;
394
395 if (acs_redirects)
396 *acs_redirects = false;
397
398
399
400
401
402
403
404 while (a) {
405 dist_b = 0;
406
407 if (pci_bridge_has_acs_redir(a)) {
408 seq_buf_print_bus_devfn(acs_list, a);
409 acs_cnt++;
410 }
411
412 bb = b;
413
414 while (bb) {
415 if (a == bb)
416 goto check_b_path_acs;
417
418 bb = pci_upstream_bridge(bb);
419 dist_b++;
420 }
421
422 a = pci_upstream_bridge(a);
423 dist_a++;
424 }
425
426 if (dist)
427 *dist = dist_a + dist_b;
428
429 return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
430
431check_b_path_acs:
432 bb = b;
433
434 while (bb) {
435 if (a == bb)
436 break;
437
438 if (pci_bridge_has_acs_redir(bb)) {
439 seq_buf_print_bus_devfn(acs_list, bb);
440 acs_cnt++;
441 }
442
443 bb = pci_upstream_bridge(bb);
444 }
445
446 if (dist)
447 *dist = dist_a + dist_b;
448
449 if (acs_cnt) {
450 if (acs_redirects)
451 *acs_redirects = true;
452
453 return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
454 }
455
456 return PCI_P2PDMA_MAP_BUS_ADDR;
457}
458
459static unsigned long map_types_idx(struct pci_dev *client)
460{
461 return (pci_domain_nr(client->bus) << 16) |
462 (client->bus->number << 8) | client->devfn;
463}
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504static enum pci_p2pdma_map_type
505upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client,
506 int *dist, bool *acs_redirects, struct seq_buf *acs_list)
507{
508 enum pci_p2pdma_map_type map_type;
509
510 map_type = __upstream_bridge_distance(provider, client, dist,
511 acs_redirects, acs_list);
512
513 if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE) {
514 if (!cpu_supports_p2pdma() &&
515 !host_bridge_whitelist(provider, client))
516 map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
517 }
518
519 if (provider->p2pdma)
520 xa_store(&provider->p2pdma->map_types, map_types_idx(client),
521 xa_mk_value(map_type), GFP_KERNEL);
522
523 return map_type;
524}
525
526static enum pci_p2pdma_map_type
527upstream_bridge_distance_warn(struct pci_dev *provider, struct pci_dev *client,
528 int *dist)
529{
530 struct seq_buf acs_list;
531 bool acs_redirects;
532 int ret;
533
534 seq_buf_init(&acs_list, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
535 if (!acs_list.buffer)
536 return -ENOMEM;
537
538 ret = upstream_bridge_distance(provider, client, dist, &acs_redirects,
539 &acs_list);
540 if (acs_redirects) {
541 pci_warn(client, "ACS redirect is set between the client and provider (%s)\n",
542 pci_name(provider));
543
544 acs_list.buffer[acs_list.len-1] = 0;
545 pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n",
546 acs_list.buffer);
547 }
548
549 if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED) {
550 pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge or whitelisted host bridge\n",
551 pci_name(provider));
552 }
553
554 kfree(acs_list.buffer);
555
556 return ret;
557}
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
577 int num_clients, bool verbose)
578{
579 bool not_supported = false;
580 struct pci_dev *pci_client;
581 int total_dist = 0;
582 int distance;
583 int i, ret;
584
585 if (num_clients == 0)
586 return -1;
587
588 for (i = 0; i < num_clients; i++) {
589 pci_client = find_parent_pci_dev(clients[i]);
590 if (!pci_client) {
591 if (verbose)
592 dev_warn(clients[i],
593 "cannot be used for peer-to-peer DMA as it is not a PCI device\n");
594 return -1;
595 }
596
597 if (verbose)
598 ret = upstream_bridge_distance_warn(provider,
599 pci_client, &distance);
600 else
601 ret = upstream_bridge_distance(provider, pci_client,
602 &distance, NULL, NULL);
603
604 pci_dev_put(pci_client);
605
606 if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED)
607 not_supported = true;
608
609 if (not_supported && !verbose)
610 break;
611
612 total_dist += distance;
613 }
614
615 if (not_supported)
616 return -1;
617
618 return total_dist;
619}
620EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
621
622
623
624
625
626bool pci_has_p2pmem(struct pci_dev *pdev)
627{
628 return pdev->p2pdma && pdev->p2pdma->p2pmem_published;
629}
630EXPORT_SYMBOL_GPL(pci_has_p2pmem);
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients)
650{
651 struct pci_dev *pdev = NULL;
652 int distance;
653 int closest_distance = INT_MAX;
654 struct pci_dev **closest_pdevs;
655 int dev_cnt = 0;
656 const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs);
657 int i;
658
659 closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL);
660 if (!closest_pdevs)
661 return NULL;
662
663 while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
664 if (!pci_has_p2pmem(pdev))
665 continue;
666
667 distance = pci_p2pdma_distance_many(pdev, clients,
668 num_clients, false);
669 if (distance < 0 || distance > closest_distance)
670 continue;
671
672 if (distance == closest_distance && dev_cnt >= max_devs)
673 continue;
674
675 if (distance < closest_distance) {
676 for (i = 0; i < dev_cnt; i++)
677 pci_dev_put(closest_pdevs[i]);
678
679 dev_cnt = 0;
680 closest_distance = distance;
681 }
682
683 closest_pdevs[dev_cnt++] = pci_dev_get(pdev);
684 }
685
686 if (dev_cnt)
687 pdev = pci_dev_get(closest_pdevs[prandom_u32_max(dev_cnt)]);
688
689 for (i = 0; i < dev_cnt; i++)
690 pci_dev_put(closest_pdevs[i]);
691
692 kfree(closest_pdevs);
693 return pdev;
694}
695EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
696
697
698
699
700
701
702
703
704void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
705{
706 void *ret = NULL;
707 struct percpu_ref *ref;
708
709
710
711
712
713
714 rcu_read_lock();
715 if (unlikely(!pdev->p2pdma))
716 goto out;
717
718 ret = (void *)gen_pool_alloc_owner(pdev->p2pdma->pool, size,
719 (void **) &ref);
720 if (!ret)
721 goto out;
722
723 if (unlikely(!percpu_ref_tryget_live(ref))) {
724 gen_pool_free(pdev->p2pdma->pool, (unsigned long) ret, size);
725 ret = NULL;
726 goto out;
727 }
728out:
729 rcu_read_unlock();
730 return ret;
731}
732EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
733
734
735
736
737
738
739
740void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
741{
742 struct percpu_ref *ref;
743
744 gen_pool_free_owner(pdev->p2pdma->pool, (uintptr_t)addr, size,
745 (void **) &ref);
746 percpu_ref_put(ref);
747}
748EXPORT_SYMBOL_GPL(pci_free_p2pmem);
749
750
751
752
753
754
755
756pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr)
757{
758 if (!addr)
759 return 0;
760 if (!pdev->p2pdma)
761 return 0;
762
763
764
765
766
767
768 return gen_pool_virt_to_phys(pdev->p2pdma->pool, (unsigned long)addr);
769}
770EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus);
771
772
773
774
775
776
777
778
779
780struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
781 unsigned int *nents, u32 length)
782{
783 struct scatterlist *sg;
784 void *addr;
785
786 sg = kmalloc(sizeof(*sg), GFP_KERNEL);
787 if (!sg)
788 return NULL;
789
790 sg_init_table(sg, 1);
791
792 addr = pci_alloc_p2pmem(pdev, length);
793 if (!addr)
794 goto out_free_sg;
795
796 sg_set_buf(sg, addr, length);
797 *nents = 1;
798 return sg;
799
800out_free_sg:
801 kfree(sg);
802 return NULL;
803}
804EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl);
805
806
807
808
809
810
811void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl)
812{
813 struct scatterlist *sg;
814 int count;
815
816 for_each_sg(sgl, sg, INT_MAX, count) {
817 if (!sg)
818 break;
819
820 pci_free_p2pmem(pdev, sg_virt(sg), sg->length);
821 }
822 kfree(sgl);
823}
824EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl);
825
826
827
828
829
830
831
832
833
834
835
836
837void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
838{
839 if (pdev->p2pdma)
840 pdev->p2pdma->p2pmem_published = publish;
841}
842EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
843
844static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct pci_dev *provider,
845 struct pci_dev *client)
846{
847 if (!provider->p2pdma)
848 return PCI_P2PDMA_MAP_NOT_SUPPORTED;
849
850 return xa_to_value(xa_load(&provider->p2pdma->map_types,
851 map_types_idx(client)));
852}
853
854static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
855 struct device *dev, struct scatterlist *sg, int nents)
856{
857 struct scatterlist *s;
858 int i;
859
860 for_each_sg(sg, s, nents, i) {
861 s->dma_address = sg_phys(s) - p2p_pgmap->bus_offset;
862 sg_dma_len(s) = s->length;
863 }
864
865 return nents;
866}
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
882 int nents, enum dma_data_direction dir, unsigned long attrs)
883{
884 struct pci_p2pdma_pagemap *p2p_pgmap =
885 to_p2p_pgmap(sg_page(sg)->pgmap);
886 struct pci_dev *client;
887
888 if (WARN_ON_ONCE(!dev_is_pci(dev)))
889 return 0;
890
891 client = to_pci_dev(dev);
892
893 switch (pci_p2pdma_map_type(p2p_pgmap->provider, client)) {
894 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
895 return dma_map_sg_attrs(dev, sg, nents, dir, attrs);
896 case PCI_P2PDMA_MAP_BUS_ADDR:
897 return __pci_p2pdma_map_sg(p2p_pgmap, dev, sg, nents);
898 default:
899 WARN_ON_ONCE(1);
900 return 0;
901 }
902}
903EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
904
905
906
907
908
909
910
911
912
913
914void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
915 int nents, enum dma_data_direction dir, unsigned long attrs)
916{
917 struct pci_p2pdma_pagemap *p2p_pgmap =
918 to_p2p_pgmap(sg_page(sg)->pgmap);
919 enum pci_p2pdma_map_type map_type;
920 struct pci_dev *client;
921
922 if (WARN_ON_ONCE(!dev_is_pci(dev)))
923 return;
924
925 client = to_pci_dev(dev);
926
927 map_type = pci_p2pdma_map_type(p2p_pgmap->provider, client);
928
929 if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
930 dma_unmap_sg_attrs(dev, sg, nents, dir, attrs);
931}
932EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs);
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
955 bool *use_p2pdma)
956{
957 struct device *dev;
958
959 dev = bus_find_device_by_name(&pci_bus_type, NULL, page);
960 if (dev) {
961 *use_p2pdma = true;
962 *p2p_dev = to_pci_dev(dev);
963
964 if (!pci_has_p2pmem(*p2p_dev)) {
965 pci_err(*p2p_dev,
966 "PCI device has no peer-to-peer memory: %s\n",
967 page);
968 pci_dev_put(*p2p_dev);
969 return -ENODEV;
970 }
971
972 return 0;
973 } else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) {
974
975
976
977
978
979
980 } else if (!strtobool(page, use_p2pdma)) {
981 return 0;
982 }
983
984 pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page);
985 return -ENODEV;
986}
987EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store);
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
1002 bool use_p2pdma)
1003{
1004 if (!use_p2pdma)
1005 return sprintf(page, "0\n");
1006
1007 if (!p2p_dev)
1008 return sprintf(page, "1\n");
1009
1010 return sprintf(page, "%s\n", pci_name(p2p_dev));
1011}
1012EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show);
1013