1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) "pci-p2pdma: " fmt
12#include <linux/ctype.h>
13#include <linux/pci-p2pdma.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/genalloc.h>
17#include <linux/memremap.h>
18#include <linux/percpu-refcount.h>
19#include <linux/random.h>
20#include <linux/seq_buf.h>
21#include <linux/xarray.h>
22
23enum pci_p2pdma_map_type {
24 PCI_P2PDMA_MAP_UNKNOWN = 0,
25 PCI_P2PDMA_MAP_NOT_SUPPORTED,
26 PCI_P2PDMA_MAP_BUS_ADDR,
27 PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
28};
29
30struct pci_p2pdma {
31 struct gen_pool *pool;
32 bool p2pmem_published;
33 struct xarray map_types;
34};
35
36struct pci_p2pdma_pagemap {
37 struct dev_pagemap pgmap;
38 struct pci_dev *provider;
39 u64 bus_offset;
40};
41
42static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap)
43{
44 return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap);
45}
46
47static ssize_t size_show(struct device *dev, struct device_attribute *attr,
48 char *buf)
49{
50 struct pci_dev *pdev = to_pci_dev(dev);
51 struct pci_p2pdma *p2pdma;
52 size_t size = 0;
53
54 rcu_read_lock();
55 p2pdma = rcu_dereference(pdev->p2pdma);
56 if (p2pdma && p2pdma->pool)
57 size = gen_pool_size(p2pdma->pool);
58 rcu_read_unlock();
59
60 return sysfs_emit(buf, "%zd\n", size);
61}
62static DEVICE_ATTR_RO(size);
63
64static ssize_t available_show(struct device *dev, struct device_attribute *attr,
65 char *buf)
66{
67 struct pci_dev *pdev = to_pci_dev(dev);
68 struct pci_p2pdma *p2pdma;
69 size_t avail = 0;
70
71 rcu_read_lock();
72 p2pdma = rcu_dereference(pdev->p2pdma);
73 if (p2pdma && p2pdma->pool)
74 avail = gen_pool_avail(p2pdma->pool);
75 rcu_read_unlock();
76
77 return sysfs_emit(buf, "%zd\n", avail);
78}
79static DEVICE_ATTR_RO(available);
80
81static ssize_t published_show(struct device *dev, struct device_attribute *attr,
82 char *buf)
83{
84 struct pci_dev *pdev = to_pci_dev(dev);
85 struct pci_p2pdma *p2pdma;
86 bool published = false;
87
88 rcu_read_lock();
89 p2pdma = rcu_dereference(pdev->p2pdma);
90 if (p2pdma)
91 published = p2pdma->p2pmem_published;
92 rcu_read_unlock();
93
94 return sysfs_emit(buf, "%d\n", published);
95}
96static DEVICE_ATTR_RO(published);
97
98static struct attribute *p2pmem_attrs[] = {
99 &dev_attr_size.attr,
100 &dev_attr_available.attr,
101 &dev_attr_published.attr,
102 NULL,
103};
104
105static const struct attribute_group p2pmem_group = {
106 .attrs = p2pmem_attrs,
107 .name = "p2pmem",
108};
109
110static void pci_p2pdma_release(void *data)
111{
112 struct pci_dev *pdev = data;
113 struct pci_p2pdma *p2pdma;
114
115 p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
116 if (!p2pdma)
117 return;
118
119
120 pdev->p2pdma = NULL;
121 synchronize_rcu();
122
123 gen_pool_destroy(p2pdma->pool);
124 sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
125 xa_destroy(&p2pdma->map_types);
126}
127
128static int pci_p2pdma_setup(struct pci_dev *pdev)
129{
130 int error = -ENOMEM;
131 struct pci_p2pdma *p2p;
132
133 p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL);
134 if (!p2p)
135 return -ENOMEM;
136
137 xa_init(&p2p->map_types);
138
139 p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
140 if (!p2p->pool)
141 goto out;
142
143 error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
144 if (error)
145 goto out_pool_destroy;
146
147 error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group);
148 if (error)
149 goto out_pool_destroy;
150
151 rcu_assign_pointer(pdev->p2pdma, p2p);
152 return 0;
153
154out_pool_destroy:
155 gen_pool_destroy(p2p->pool);
156out:
157 devm_kfree(&pdev->dev, p2p);
158 return error;
159}
160
161
162
163
164
165
166
167
168
169
170
171int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
172 u64 offset)
173{
174 struct pci_p2pdma_pagemap *p2p_pgmap;
175 struct dev_pagemap *pgmap;
176 struct pci_p2pdma *p2pdma;
177 void *addr;
178 int error;
179
180 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
181 return -EINVAL;
182
183 if (offset >= pci_resource_len(pdev, bar))
184 return -EINVAL;
185
186 if (!size)
187 size = pci_resource_len(pdev, bar) - offset;
188
189 if (size + offset > pci_resource_len(pdev, bar))
190 return -EINVAL;
191
192 if (!pdev->p2pdma) {
193 error = pci_p2pdma_setup(pdev);
194 if (error)
195 return error;
196 }
197
198 p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
199 if (!p2p_pgmap)
200 return -ENOMEM;
201
202 pgmap = &p2p_pgmap->pgmap;
203 pgmap->range.start = pci_resource_start(pdev, bar) + offset;
204 pgmap->range.end = pgmap->range.start + size - 1;
205 pgmap->nr_range = 1;
206 pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
207
208 p2p_pgmap->provider = pdev;
209 p2p_pgmap->bus_offset = pci_bus_address(pdev, bar) -
210 pci_resource_start(pdev, bar);
211
212 addr = devm_memremap_pages(&pdev->dev, pgmap);
213 if (IS_ERR(addr)) {
214 error = PTR_ERR(addr);
215 goto pgmap_free;
216 }
217
218 p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
219 error = gen_pool_add_owner(p2pdma->pool, (unsigned long)addr,
220 pci_bus_address(pdev, bar) + offset,
221 range_len(&pgmap->range), dev_to_node(&pdev->dev),
222 pgmap->ref);
223 if (error)
224 goto pages_free;
225
226 pci_info(pdev, "added peer-to-peer DMA memory %#llx-%#llx\n",
227 pgmap->range.start, pgmap->range.end);
228
229 return 0;
230
231pages_free:
232 devm_memunmap_pages(&pdev->dev, pgmap);
233pgmap_free:
234 devm_kfree(&pdev->dev, pgmap);
235 return error;
236}
237EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
238
239
240
241
242
243
244static struct pci_dev *find_parent_pci_dev(struct device *dev)
245{
246 struct device *parent;
247
248 dev = get_device(dev);
249
250 while (dev) {
251 if (dev_is_pci(dev))
252 return to_pci_dev(dev);
253
254 parent = get_device(dev->parent);
255 put_device(dev);
256 dev = parent;
257 }
258
259 return NULL;
260}
261
262
263
264
265
266
267static int pci_bridge_has_acs_redir(struct pci_dev *pdev)
268{
269 int pos;
270 u16 ctrl;
271
272 pos = pdev->acs_cap;
273 if (!pos)
274 return 0;
275
276 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
277
278 if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC))
279 return 1;
280
281 return 0;
282}
283
284static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
285{
286 if (!buf)
287 return;
288
289 seq_buf_printf(buf, "%s;", pci_name(pdev));
290}
291
292static bool cpu_supports_p2pdma(void)
293{
294#ifdef CONFIG_X86
295 struct cpuinfo_x86 *c = &cpu_data(0);
296
297
298 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17)
299 return true;
300#endif
301
302 return false;
303}
304
305static const struct pci_p2pdma_whitelist_entry {
306 unsigned short vendor;
307 unsigned short device;
308 enum {
309 REQ_SAME_HOST_BRIDGE = 1 << 0,
310 } flags;
311} pci_p2pdma_whitelist[] = {
312
313 {PCI_VENDOR_ID_INTEL, 0x3c00, REQ_SAME_HOST_BRIDGE},
314 {PCI_VENDOR_ID_INTEL, 0x3c01, REQ_SAME_HOST_BRIDGE},
315
316 {PCI_VENDOR_ID_INTEL, 0x2f00, REQ_SAME_HOST_BRIDGE},
317 {PCI_VENDOR_ID_INTEL, 0x2f01, REQ_SAME_HOST_BRIDGE},
318
319 {PCI_VENDOR_ID_INTEL, 0x2030, 0},
320 {PCI_VENDOR_ID_INTEL, 0x2031, 0},
321 {PCI_VENDOR_ID_INTEL, 0x2032, 0},
322 {PCI_VENDOR_ID_INTEL, 0x2033, 0},
323 {PCI_VENDOR_ID_INTEL, 0x2020, 0},
324 {}
325};
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343static struct pci_dev *pci_host_bridge_dev(struct pci_host_bridge *host)
344{
345 struct pci_dev *root;
346
347 root = list_first_entry_or_null(&host->bus->devices,
348 struct pci_dev, bus_list);
349
350 if (!root)
351 return NULL;
352 if (root->devfn != PCI_DEVFN(0, 0))
353 return NULL;
354
355 return root;
356}
357
358static bool __host_bridge_whitelist(struct pci_host_bridge *host,
359 bool same_host_bridge, bool warn)
360{
361 struct pci_dev *root = pci_host_bridge_dev(host);
362 const struct pci_p2pdma_whitelist_entry *entry;
363 unsigned short vendor, device;
364
365 if (!root)
366 return false;
367
368 vendor = root->vendor;
369 device = root->device;
370
371 for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) {
372 if (vendor != entry->vendor || device != entry->device)
373 continue;
374 if (entry->flags & REQ_SAME_HOST_BRIDGE && !same_host_bridge)
375 return false;
376
377 return true;
378 }
379
380 if (warn)
381 pci_warn(root, "Host bridge not in P2PDMA whitelist: %04x:%04x\n",
382 vendor, device);
383
384 return false;
385}
386
387
388
389
390
391static bool host_bridge_whitelist(struct pci_dev *a, struct pci_dev *b,
392 bool warn)
393{
394 struct pci_host_bridge *host_a = pci_find_host_bridge(a->bus);
395 struct pci_host_bridge *host_b = pci_find_host_bridge(b->bus);
396
397 if (host_a == host_b)
398 return __host_bridge_whitelist(host_a, true, warn);
399
400 if (__host_bridge_whitelist(host_a, false, warn) &&
401 __host_bridge_whitelist(host_b, false, warn))
402 return true;
403
404 return false;
405}
406
407static unsigned long map_types_idx(struct pci_dev *client)
408{
409 return (pci_domain_nr(client->bus) << 16) |
410 (client->bus->number << 8) | client->devfn;
411}
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449static enum pci_p2pdma_map_type
450calc_map_type_and_dist(struct pci_dev *provider, struct pci_dev *client,
451 int *dist, bool verbose)
452{
453 enum pci_p2pdma_map_type map_type = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
454 struct pci_dev *a = provider, *b = client, *bb;
455 bool acs_redirects = false;
456 struct pci_p2pdma *p2pdma;
457 struct seq_buf acs_list;
458 int acs_cnt = 0;
459 int dist_a = 0;
460 int dist_b = 0;
461 char buf[128];
462
463 seq_buf_init(&acs_list, buf, sizeof(buf));
464
465
466
467
468
469
470 while (a) {
471 dist_b = 0;
472
473 if (pci_bridge_has_acs_redir(a)) {
474 seq_buf_print_bus_devfn(&acs_list, a);
475 acs_cnt++;
476 }
477
478 bb = b;
479
480 while (bb) {
481 if (a == bb)
482 goto check_b_path_acs;
483
484 bb = pci_upstream_bridge(bb);
485 dist_b++;
486 }
487
488 a = pci_upstream_bridge(a);
489 dist_a++;
490 }
491
492 *dist = dist_a + dist_b;
493 goto map_through_host_bridge;
494
495check_b_path_acs:
496 bb = b;
497
498 while (bb) {
499 if (a == bb)
500 break;
501
502 if (pci_bridge_has_acs_redir(bb)) {
503 seq_buf_print_bus_devfn(&acs_list, bb);
504 acs_cnt++;
505 }
506
507 bb = pci_upstream_bridge(bb);
508 }
509
510 *dist = dist_a + dist_b;
511
512 if (!acs_cnt) {
513 map_type = PCI_P2PDMA_MAP_BUS_ADDR;
514 goto done;
515 }
516
517 if (verbose) {
518 acs_list.buffer[acs_list.len-1] = 0;
519 pci_warn(client, "ACS redirect is set between the client and provider (%s)\n",
520 pci_name(provider));
521 pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n",
522 acs_list.buffer);
523 }
524 acs_redirects = true;
525
526map_through_host_bridge:
527 if (!cpu_supports_p2pdma() &&
528 !host_bridge_whitelist(provider, client, acs_redirects)) {
529 if (verbose)
530 pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge or whitelisted host bridge\n",
531 pci_name(provider));
532 map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
533 }
534done:
535 rcu_read_lock();
536 p2pdma = rcu_dereference(provider->p2pdma);
537 if (p2pdma)
538 xa_store(&p2pdma->map_types, map_types_idx(client),
539 xa_mk_value(map_type), GFP_KERNEL);
540 rcu_read_unlock();
541 return map_type;
542}
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
562 int num_clients, bool verbose)
563{
564 enum pci_p2pdma_map_type map;
565 bool not_supported = false;
566 struct pci_dev *pci_client;
567 int total_dist = 0;
568 int i, distance;
569
570 if (num_clients == 0)
571 return -1;
572
573 for (i = 0; i < num_clients; i++) {
574 pci_client = find_parent_pci_dev(clients[i]);
575 if (!pci_client) {
576 if (verbose)
577 dev_warn(clients[i],
578 "cannot be used for peer-to-peer DMA as it is not a PCI device\n");
579 return -1;
580 }
581
582 map = calc_map_type_and_dist(provider, pci_client, &distance,
583 verbose);
584
585 pci_dev_put(pci_client);
586
587 if (map == PCI_P2PDMA_MAP_NOT_SUPPORTED)
588 not_supported = true;
589
590 if (not_supported && !verbose)
591 break;
592
593 total_dist += distance;
594 }
595
596 if (not_supported)
597 return -1;
598
599 return total_dist;
600}
601EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
602
603
604
605
606
607bool pci_has_p2pmem(struct pci_dev *pdev)
608{
609 struct pci_p2pdma *p2pdma;
610 bool res;
611
612 rcu_read_lock();
613 p2pdma = rcu_dereference(pdev->p2pdma);
614 res = p2pdma && p2pdma->p2pmem_published;
615 rcu_read_unlock();
616
617 return res;
618}
619EXPORT_SYMBOL_GPL(pci_has_p2pmem);
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients)
639{
640 struct pci_dev *pdev = NULL;
641 int distance;
642 int closest_distance = INT_MAX;
643 struct pci_dev **closest_pdevs;
644 int dev_cnt = 0;
645 const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs);
646 int i;
647
648 closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL);
649 if (!closest_pdevs)
650 return NULL;
651
652 while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
653 if (!pci_has_p2pmem(pdev))
654 continue;
655
656 distance = pci_p2pdma_distance_many(pdev, clients,
657 num_clients, false);
658 if (distance < 0 || distance > closest_distance)
659 continue;
660
661 if (distance == closest_distance && dev_cnt >= max_devs)
662 continue;
663
664 if (distance < closest_distance) {
665 for (i = 0; i < dev_cnt; i++)
666 pci_dev_put(closest_pdevs[i]);
667
668 dev_cnt = 0;
669 closest_distance = distance;
670 }
671
672 closest_pdevs[dev_cnt++] = pci_dev_get(pdev);
673 }
674
675 if (dev_cnt)
676 pdev = pci_dev_get(closest_pdevs[prandom_u32_max(dev_cnt)]);
677
678 for (i = 0; i < dev_cnt; i++)
679 pci_dev_put(closest_pdevs[i]);
680
681 kfree(closest_pdevs);
682 return pdev;
683}
684EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
685
686
687
688
689
690
691
692
693void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
694{
695 void *ret = NULL;
696 struct percpu_ref *ref;
697 struct pci_p2pdma *p2pdma;
698
699
700
701
702
703
704 rcu_read_lock();
705 p2pdma = rcu_dereference(pdev->p2pdma);
706 if (unlikely(!p2pdma))
707 goto out;
708
709 ret = (void *)gen_pool_alloc_owner(p2pdma->pool, size, (void **) &ref);
710 if (!ret)
711 goto out;
712
713 if (unlikely(!percpu_ref_tryget_live(ref))) {
714 gen_pool_free(p2pdma->pool, (unsigned long) ret, size);
715 ret = NULL;
716 goto out;
717 }
718out:
719 rcu_read_unlock();
720 return ret;
721}
722EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
723
724
725
726
727
728
729
730void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
731{
732 struct percpu_ref *ref;
733 struct pci_p2pdma *p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
734
735 gen_pool_free_owner(p2pdma->pool, (uintptr_t)addr, size,
736 (void **) &ref);
737 percpu_ref_put(ref);
738}
739EXPORT_SYMBOL_GPL(pci_free_p2pmem);
740
741
742
743
744
745
746
747pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr)
748{
749 struct pci_p2pdma *p2pdma;
750
751 if (!addr)
752 return 0;
753
754 p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
755 if (!p2pdma)
756 return 0;
757
758
759
760
761
762
763 return gen_pool_virt_to_phys(p2pdma->pool, (unsigned long)addr);
764}
765EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus);
766
767
768
769
770
771
772
773
774
775struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
776 unsigned int *nents, u32 length)
777{
778 struct scatterlist *sg;
779 void *addr;
780
781 sg = kmalloc(sizeof(*sg), GFP_KERNEL);
782 if (!sg)
783 return NULL;
784
785 sg_init_table(sg, 1);
786
787 addr = pci_alloc_p2pmem(pdev, length);
788 if (!addr)
789 goto out_free_sg;
790
791 sg_set_buf(sg, addr, length);
792 *nents = 1;
793 return sg;
794
795out_free_sg:
796 kfree(sg);
797 return NULL;
798}
799EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl);
800
801
802
803
804
805
806void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl)
807{
808 struct scatterlist *sg;
809 int count;
810
811 for_each_sg(sgl, sg, INT_MAX, count) {
812 if (!sg)
813 break;
814
815 pci_free_p2pmem(pdev, sg_virt(sg), sg->length);
816 }
817 kfree(sgl);
818}
819EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl);
820
821
822
823
824
825
826
827
828
829
830
831
832void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
833{
834 struct pci_p2pdma *p2pdma;
835
836 rcu_read_lock();
837 p2pdma = rcu_dereference(pdev->p2pdma);
838 if (p2pdma)
839 p2pdma->p2pmem_published = publish;
840 rcu_read_unlock();
841}
842EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
843
844static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
845 struct device *dev)
846{
847 enum pci_p2pdma_map_type type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
848 struct pci_dev *provider = to_p2p_pgmap(pgmap)->provider;
849 struct pci_dev *client;
850 struct pci_p2pdma *p2pdma;
851
852 if (!provider->p2pdma)
853 return PCI_P2PDMA_MAP_NOT_SUPPORTED;
854
855 if (!dev_is_pci(dev))
856 return PCI_P2PDMA_MAP_NOT_SUPPORTED;
857
858 client = to_pci_dev(dev);
859
860 rcu_read_lock();
861 p2pdma = rcu_dereference(provider->p2pdma);
862
863 if (p2pdma)
864 type = xa_to_value(xa_load(&p2pdma->map_types,
865 map_types_idx(client)));
866 rcu_read_unlock();
867 return type;
868}
869
870static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
871 struct device *dev, struct scatterlist *sg, int nents)
872{
873 struct scatterlist *s;
874 int i;
875
876 for_each_sg(sg, s, nents, i) {
877 s->dma_address = sg_phys(s) - p2p_pgmap->bus_offset;
878 sg_dma_len(s) = s->length;
879 }
880
881 return nents;
882}
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
898 int nents, enum dma_data_direction dir, unsigned long attrs)
899{
900 struct pci_p2pdma_pagemap *p2p_pgmap =
901 to_p2p_pgmap(sg_page(sg)->pgmap);
902
903 switch (pci_p2pdma_map_type(sg_page(sg)->pgmap, dev)) {
904 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
905 return dma_map_sg_attrs(dev, sg, nents, dir, attrs);
906 case PCI_P2PDMA_MAP_BUS_ADDR:
907 return __pci_p2pdma_map_sg(p2p_pgmap, dev, sg, nents);
908 default:
909 WARN_ON_ONCE(1);
910 return 0;
911 }
912}
913EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
914
915
916
917
918
919
920
921
922
923
924void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
925 int nents, enum dma_data_direction dir, unsigned long attrs)
926{
927 enum pci_p2pdma_map_type map_type;
928
929 map_type = pci_p2pdma_map_type(sg_page(sg)->pgmap, dev);
930
931 if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
932 dma_unmap_sg_attrs(dev, sg, nents, dir, attrs);
933}
934EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs);
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
957 bool *use_p2pdma)
958{
959 struct device *dev;
960
961 dev = bus_find_device_by_name(&pci_bus_type, NULL, page);
962 if (dev) {
963 *use_p2pdma = true;
964 *p2p_dev = to_pci_dev(dev);
965
966 if (!pci_has_p2pmem(*p2p_dev)) {
967 pci_err(*p2p_dev,
968 "PCI device has no peer-to-peer memory: %s\n",
969 page);
970 pci_dev_put(*p2p_dev);
971 return -ENODEV;
972 }
973
974 return 0;
975 } else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) {
976
977
978
979
980
981
982 } else if (!strtobool(page, use_p2pdma)) {
983 return 0;
984 }
985
986 pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page);
987 return -ENODEV;
988}
989EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store);
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
1004 bool use_p2pdma)
1005{
1006 if (!use_p2pdma)
1007 return sprintf(page, "0\n");
1008
1009 if (!p2p_dev)
1010 return sprintf(page, "1\n");
1011
1012 return sprintf(page, "%s\n", pci_name(p2p_dev));
1013}
1014EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show);
1015