1
2
3
4
5
6
7
8#undef DEBUG
9
10#include <linux/kernel.h>
11#include <linux/pci.h>
12#include <linux/crash_dump.h>
13#include <linux/delay.h>
14#include <linux/string.h>
15#include <linux/init.h>
16#include <linux/memblock.h>
17#include <linux/irq.h>
18#include <linux/io.h>
19#include <linux/msi.h>
20#include <linux/iommu.h>
21#include <linux/rculist.h>
22#include <linux/sizes.h>
23
24#include <asm/sections.h>
25#include <asm/io.h>
26#include <asm/prom.h>
27#include <asm/pci-bridge.h>
28#include <asm/machdep.h>
29#include <asm/msi_bitmap.h>
30#include <asm/ppc-pci.h>
31#include <asm/opal.h>
32#include <asm/iommu.h>
33#include <asm/tce.h>
34#include <asm/xics.h>
35#include <asm/debugfs.h>
36#include <asm/firmware.h>
37#include <asm/pnv-pci.h>
38#include <asm/mmzone.h>
39
40#include <misc/cxl-base.h>
41
42#include "powernv.h"
43#include "pci.h"
44#include "../../../../drivers/pci/pci.h"
45
46#define PNV_IODA1_M64_NUM 16
47#define PNV_IODA1_M64_SEGS 8
48#define PNV_IODA1_DMA32_SEGSIZE 0x10000000
49
50static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_OCAPI" };
51
52static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
53static void pnv_pci_configure_bus(struct pci_bus *bus);
54
55void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
56 const char *fmt, ...)
57{
58 struct va_format vaf;
59 va_list args;
60 char pfix[32];
61
62 va_start(args, fmt);
63
64 vaf.fmt = fmt;
65 vaf.va = &args;
66
67 if (pe->flags & PNV_IODA_PE_DEV)
68 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
69 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
70 sprintf(pfix, "%04x:%02x ",
71 pci_domain_nr(pe->pbus), pe->pbus->number);
72#ifdef CONFIG_PCI_IOV
73 else if (pe->flags & PNV_IODA_PE_VF)
74 sprintf(pfix, "%04x:%02x:%2x.%d",
75 pci_domain_nr(pe->parent_dev->bus),
76 (pe->rid & 0xff00) >> 8,
77 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
78#endif
79
80 printk("%spci %s: [PE# %.2x] %pV",
81 level, pfix, pe->pe_number, &vaf);
82
83 va_end(args);
84}
85
86static bool pnv_iommu_bypass_disabled __read_mostly;
87static bool pci_reset_phbs __read_mostly;
88
89static int __init iommu_setup(char *str)
90{
91 if (!str)
92 return -EINVAL;
93
94 while (*str) {
95 if (!strncmp(str, "nobypass", 8)) {
96 pnv_iommu_bypass_disabled = true;
97 pr_info("PowerNV: IOMMU bypass window disabled.\n");
98 break;
99 }
100 str += strcspn(str, ",");
101 if (*str == ',')
102 str++;
103 }
104
105 return 0;
106}
107early_param("iommu", iommu_setup);
108
109static int __init pci_reset_phbs_setup(char *str)
110{
111 pci_reset_phbs = true;
112 return 0;
113}
114
115early_param("ppc_pci_reset_phbs", pci_reset_phbs_setup);
116
117static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
118{
119 s64 rc;
120
121 phb->ioda.pe_array[pe_no].phb = phb;
122 phb->ioda.pe_array[pe_no].pe_number = pe_no;
123 phb->ioda.pe_array[pe_no].dma_setup_done = false;
124
125
126
127
128
129
130 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
131 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
132 if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
133 pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n",
134 __func__, rc, phb->hose->global_number, pe_no);
135
136 return &phb->ioda.pe_array[pe_no];
137}
138
139static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
140{
141 if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) {
142 pr_warn("%s: Invalid PE %x on PHB#%x\n",
143 __func__, pe_no, phb->hose->global_number);
144 return;
145 }
146
147 mutex_lock(&phb->ioda.pe_alloc_mutex);
148 if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
149 pr_debug("%s: PE %x was reserved on PHB#%x\n",
150 __func__, pe_no, phb->hose->global_number);
151 mutex_unlock(&phb->ioda.pe_alloc_mutex);
152
153 pnv_ioda_init_pe(phb, pe_no);
154}
155
156struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb, int count)
157{
158 struct pnv_ioda_pe *ret = NULL;
159 int run = 0, pe, i;
160
161 mutex_lock(&phb->ioda.pe_alloc_mutex);
162
163
164 for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
165 if (test_bit(pe, phb->ioda.pe_alloc)) {
166 run = 0;
167 continue;
168 }
169
170 run++;
171 if (run == count)
172 break;
173 }
174 if (run != count)
175 goto out;
176
177 for (i = pe; i < pe + count; i++) {
178 set_bit(i, phb->ioda.pe_alloc);
179 pnv_ioda_init_pe(phb, i);
180 }
181 ret = &phb->ioda.pe_array[pe];
182
183out:
184 mutex_unlock(&phb->ioda.pe_alloc_mutex);
185 return ret;
186}
187
188void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
189{
190 struct pnv_phb *phb = pe->phb;
191 unsigned int pe_num = pe->pe_number;
192
193 WARN_ON(pe->pdev);
194 memset(pe, 0, sizeof(struct pnv_ioda_pe));
195
196 mutex_lock(&phb->ioda.pe_alloc_mutex);
197 clear_bit(pe_num, phb->ioda.pe_alloc);
198 mutex_unlock(&phb->ioda.pe_alloc_mutex);
199}
200
201
202static int pnv_ioda2_init_m64(struct pnv_phb *phb)
203{
204 const char *desc;
205 struct resource *r;
206 s64 rc;
207
208
209 rc = opal_pci_set_phb_mem_window(phb->opal_id,
210 OPAL_M64_WINDOW_TYPE,
211 phb->ioda.m64_bar_idx,
212 phb->ioda.m64_base,
213 0,
214 phb->ioda.m64_size);
215 if (rc != OPAL_SUCCESS) {
216 desc = "configuring";
217 goto fail;
218 }
219
220
221 rc = opal_pci_phb_mmio_enable(phb->opal_id,
222 OPAL_M64_WINDOW_TYPE,
223 phb->ioda.m64_bar_idx,
224 OPAL_ENABLE_M64_SPLIT);
225 if (rc != OPAL_SUCCESS) {
226 desc = "enabling";
227 goto fail;
228 }
229
230
231
232
233
234 r = &phb->hose->mem_resources[1];
235 if (phb->ioda.reserved_pe_idx == 0)
236 r->start += (2 * phb->ioda.m64_segsize);
237 else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
238 r->end -= (2 * phb->ioda.m64_segsize);
239 else
240 pr_warn(" Cannot strip M64 segment for reserved PE#%x\n",
241 phb->ioda.reserved_pe_idx);
242
243 return 0;
244
245fail:
246 pr_warn(" Failure %lld %s M64 BAR#%d\n",
247 rc, desc, phb->ioda.m64_bar_idx);
248 opal_pci_phb_mmio_enable(phb->opal_id,
249 OPAL_M64_WINDOW_TYPE,
250 phb->ioda.m64_bar_idx,
251 OPAL_DISABLE_M64);
252 return -EIO;
253}
254
255static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
256 unsigned long *pe_bitmap)
257{
258 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
259 struct resource *r;
260 resource_size_t base, sgsz, start, end;
261 int segno, i;
262
263 base = phb->ioda.m64_base;
264 sgsz = phb->ioda.m64_segsize;
265 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
266 r = &pdev->resource[i];
267 if (!r->parent || !pnv_pci_is_m64(phb, r))
268 continue;
269
270 start = ALIGN_DOWN(r->start - base, sgsz);
271 end = ALIGN(r->end - base, sgsz);
272 for (segno = start / sgsz; segno < end / sgsz; segno++) {
273 if (pe_bitmap)
274 set_bit(segno, pe_bitmap);
275 else
276 pnv_ioda_reserve_pe(phb, segno);
277 }
278 }
279}
280
281static int pnv_ioda1_init_m64(struct pnv_phb *phb)
282{
283 struct resource *r;
284 int index;
285
286
287
288
289
290
291 for (index = 0; index < PNV_IODA1_M64_NUM; index++) {
292 unsigned long base, segsz = phb->ioda.m64_segsize;
293 int64_t rc;
294
295 base = phb->ioda.m64_base +
296 index * PNV_IODA1_M64_SEGS * segsz;
297 rc = opal_pci_set_phb_mem_window(phb->opal_id,
298 OPAL_M64_WINDOW_TYPE, index, base, 0,
299 PNV_IODA1_M64_SEGS * segsz);
300 if (rc != OPAL_SUCCESS) {
301 pr_warn(" Error %lld setting M64 PHB#%x-BAR#%d\n",
302 rc, phb->hose->global_number, index);
303 goto fail;
304 }
305
306 rc = opal_pci_phb_mmio_enable(phb->opal_id,
307 OPAL_M64_WINDOW_TYPE, index,
308 OPAL_ENABLE_M64_SPLIT);
309 if (rc != OPAL_SUCCESS) {
310 pr_warn(" Error %lld enabling M64 PHB#%x-BAR#%d\n",
311 rc, phb->hose->global_number, index);
312 goto fail;
313 }
314 }
315
316 for (index = 0; index < phb->ioda.total_pe_num; index++) {
317 int64_t rc;
318
319
320
321
322
323
324
325
326 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
327 index, OPAL_M64_WINDOW_TYPE,
328 index / PNV_IODA1_M64_SEGS,
329 index % PNV_IODA1_M64_SEGS);
330 if (rc != OPAL_SUCCESS) {
331 pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n",
332 __func__, rc, phb->hose->global_number,
333 index);
334 goto fail;
335 }
336 }
337
338
339
340
341
342 r = &phb->hose->mem_resources[1];
343 if (phb->ioda.reserved_pe_idx == 0)
344 r->start += (2 * phb->ioda.m64_segsize);
345 else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
346 r->end -= (2 * phb->ioda.m64_segsize);
347 else
348 WARN(1, "Wrong reserved PE#%x on PHB#%x\n",
349 phb->ioda.reserved_pe_idx, phb->hose->global_number);
350
351 return 0;
352
353fail:
354 for ( ; index >= 0; index--)
355 opal_pci_phb_mmio_enable(phb->opal_id,
356 OPAL_M64_WINDOW_TYPE, index, OPAL_DISABLE_M64);
357
358 return -EIO;
359}
360
361static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus,
362 unsigned long *pe_bitmap,
363 bool all)
364{
365 struct pci_dev *pdev;
366
367 list_for_each_entry(pdev, &bus->devices, bus_list) {
368 pnv_ioda_reserve_dev_m64_pe(pdev, pe_bitmap);
369
370 if (all && pdev->subordinate)
371 pnv_ioda_reserve_m64_pe(pdev->subordinate,
372 pe_bitmap, all);
373 }
374}
375
376static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
377{
378 struct pnv_phb *phb = pci_bus_to_pnvhb(bus);
379 struct pnv_ioda_pe *master_pe, *pe;
380 unsigned long size, *pe_alloc;
381 int i;
382
383
384 if (pci_is_root_bus(bus))
385 return NULL;
386
387
388 size = ALIGN(phb->ioda.total_pe_num / 8, sizeof(unsigned long));
389 pe_alloc = kzalloc(size, GFP_KERNEL);
390 if (!pe_alloc) {
391 pr_warn("%s: Out of memory !\n",
392 __func__);
393 return NULL;
394 }
395
396
397 pnv_ioda_reserve_m64_pe(bus, pe_alloc, all);
398
399
400
401
402
403
404 if (bitmap_empty(pe_alloc, phb->ioda.total_pe_num)) {
405 kfree(pe_alloc);
406 return NULL;
407 }
408
409
410
411
412
413 master_pe = NULL;
414 i = -1;
415 while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe_num, i + 1)) <
416 phb->ioda.total_pe_num) {
417 pe = &phb->ioda.pe_array[i];
418
419 phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number;
420 if (!master_pe) {
421 pe->flags |= PNV_IODA_PE_MASTER;
422 INIT_LIST_HEAD(&pe->slaves);
423 master_pe = pe;
424 } else {
425 pe->flags |= PNV_IODA_PE_SLAVE;
426 pe->master = master_pe;
427 list_add_tail(&pe->list, &master_pe->slaves);
428 }
429 }
430
431 kfree(pe_alloc);
432 return master_pe;
433}
434
435static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
436{
437 struct pci_controller *hose = phb->hose;
438 struct device_node *dn = hose->dn;
439 struct resource *res;
440 u32 m64_range[2], i;
441 const __be32 *r;
442 u64 pci_addr;
443
444 if (phb->type != PNV_PHB_IODA1 && phb->type != PNV_PHB_IODA2) {
445 pr_info(" Not support M64 window\n");
446 return;
447 }
448
449 if (!firmware_has_feature(FW_FEATURE_OPAL)) {
450 pr_info(" Firmware too old to support M64 window\n");
451 return;
452 }
453
454 r = of_get_property(dn, "ibm,opal-m64-window", NULL);
455 if (!r) {
456 pr_info(" No <ibm,opal-m64-window> on %pOF\n",
457 dn);
458 return;
459 }
460
461
462
463
464
465 if (of_property_read_u32_array(dn, "ibm,opal-available-m64-ranges",
466 m64_range, 2)) {
467
468 m64_range[0] = 0;
469 m64_range[1] = 16;
470 }
471
472 if (m64_range[1] > 63) {
473 pr_warn("%s: Limiting M64 range to 63 (from %d) on PHB#%x\n",
474 __func__, m64_range[1], phb->hose->global_number);
475 m64_range[1] = 63;
476 }
477
478 if (m64_range[1] <= m64_range[0]) {
479 pr_warn("%s: M64 empty, disabling M64 usage on PHB#%x\n",
480 __func__, phb->hose->global_number);
481 return;
482 }
483
484
485 res = &hose->mem_resources[1];
486 res->name = dn->full_name;
487 res->start = of_translate_address(dn, r + 2);
488 res->end = res->start + of_read_number(r + 4, 2) - 1;
489 res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
490 pci_addr = of_read_number(r, 2);
491 hose->mem_offset[1] = res->start - pci_addr;
492
493 phb->ioda.m64_size = resource_size(res);
494 phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num;
495 phb->ioda.m64_base = pci_addr;
496
497
498 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx (M64 #%d..%d)\n",
499 res->start, res->end, pci_addr, m64_range[0],
500 m64_range[0] + m64_range[1] - 1);
501
502
503 phb->ioda.m64_bar_alloc = (unsigned long)-1;
504
505
506 m64_range[1]--;
507 phb->ioda.m64_bar_idx = m64_range[0] + m64_range[1];
508
509 pr_info(" Using M64 #%d as default window\n", phb->ioda.m64_bar_idx);
510
511
512 for (i = m64_range[0]; i < m64_range[1]; i++)
513 clear_bit(i, &phb->ioda.m64_bar_alloc);
514
515
516
517
518
519 if (phb->type == PNV_PHB_IODA1)
520 phb->init_m64 = pnv_ioda1_init_m64;
521 else
522 phb->init_m64 = pnv_ioda2_init_m64;
523}
524
525static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
526{
527 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no];
528 struct pnv_ioda_pe *slave;
529 s64 rc;
530
531
532 if (pe->flags & PNV_IODA_PE_SLAVE) {
533 pe = pe->master;
534 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
535 return;
536
537 pe_no = pe->pe_number;
538 }
539
540
541 rc = opal_pci_eeh_freeze_set(phb->opal_id,
542 pe_no,
543 OPAL_EEH_ACTION_SET_FREEZE_ALL);
544 if (rc != OPAL_SUCCESS) {
545 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
546 __func__, rc, phb->hose->global_number, pe_no);
547 return;
548 }
549
550
551 if (!(pe->flags & PNV_IODA_PE_MASTER))
552 return;
553
554 list_for_each_entry(slave, &pe->slaves, list) {
555 rc = opal_pci_eeh_freeze_set(phb->opal_id,
556 slave->pe_number,
557 OPAL_EEH_ACTION_SET_FREEZE_ALL);
558 if (rc != OPAL_SUCCESS)
559 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
560 __func__, rc, phb->hose->global_number,
561 slave->pe_number);
562 }
563}
564
565static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
566{
567 struct pnv_ioda_pe *pe, *slave;
568 s64 rc;
569
570
571 pe = &phb->ioda.pe_array[pe_no];
572 if (pe->flags & PNV_IODA_PE_SLAVE) {
573 pe = pe->master;
574 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
575 pe_no = pe->pe_number;
576 }
577
578
579 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt);
580 if (rc != OPAL_SUCCESS) {
581 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
582 __func__, rc, opt, phb->hose->global_number, pe_no);
583 return -EIO;
584 }
585
586 if (!(pe->flags & PNV_IODA_PE_MASTER))
587 return 0;
588
589
590 list_for_each_entry(slave, &pe->slaves, list) {
591 rc = opal_pci_eeh_freeze_clear(phb->opal_id,
592 slave->pe_number,
593 opt);
594 if (rc != OPAL_SUCCESS) {
595 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
596 __func__, rc, opt, phb->hose->global_number,
597 slave->pe_number);
598 return -EIO;
599 }
600 }
601
602 return 0;
603}
604
605static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
606{
607 struct pnv_ioda_pe *slave, *pe;
608 u8 fstate = 0, state;
609 __be16 pcierr = 0;
610 s64 rc;
611
612
613 if (pe_no < 0 || pe_no >= phb->ioda.total_pe_num)
614 return OPAL_EEH_STOPPED_PERM_UNAVAIL;
615
616
617
618
619
620 pe = &phb->ioda.pe_array[pe_no];
621 if (pe->flags & PNV_IODA_PE_SLAVE) {
622 pe = pe->master;
623 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
624 pe_no = pe->pe_number;
625 }
626
627
628 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
629 &state, &pcierr, NULL);
630 if (rc != OPAL_SUCCESS) {
631 pr_warn("%s: Failure %lld getting "
632 "PHB#%x-PE#%x state\n",
633 __func__, rc,
634 phb->hose->global_number, pe_no);
635 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
636 }
637
638
639 if (!(pe->flags & PNV_IODA_PE_MASTER))
640 return state;
641
642 list_for_each_entry(slave, &pe->slaves, list) {
643 rc = opal_pci_eeh_freeze_status(phb->opal_id,
644 slave->pe_number,
645 &fstate,
646 &pcierr,
647 NULL);
648 if (rc != OPAL_SUCCESS) {
649 pr_warn("%s: Failure %lld getting "
650 "PHB#%x-PE#%x state\n",
651 __func__, rc,
652 phb->hose->global_number, slave->pe_number);
653 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
654 }
655
656
657
658
659
660 if (fstate > state)
661 state = fstate;
662 }
663
664 return state;
665}
666
667struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn)
668{
669 int pe_number = phb->ioda.pe_rmap[bdfn];
670
671 if (pe_number == IODA_INVALID_PE)
672 return NULL;
673
674 return &phb->ioda.pe_array[pe_number];
675}
676
677struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
678{
679 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
680 struct pci_dn *pdn = pci_get_pdn(dev);
681
682 if (!pdn)
683 return NULL;
684 if (pdn->pe_number == IODA_INVALID_PE)
685 return NULL;
686 return &phb->ioda.pe_array[pdn->pe_number];
687}
688
689static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
690 struct pnv_ioda_pe *parent,
691 struct pnv_ioda_pe *child,
692 bool is_add)
693{
694 const char *desc = is_add ? "adding" : "removing";
695 uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN :
696 OPAL_REMOVE_PE_FROM_DOMAIN;
697 struct pnv_ioda_pe *slave;
698 long rc;
699
700
701 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
702 child->pe_number, op);
703 if (rc != OPAL_SUCCESS) {
704 pe_warn(child, "OPAL error %ld %s to parent PELTV\n",
705 rc, desc);
706 return -ENXIO;
707 }
708
709 if (!(child->flags & PNV_IODA_PE_MASTER))
710 return 0;
711
712
713 list_for_each_entry(slave, &child->slaves, list) {
714 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
715 slave->pe_number, op);
716 if (rc != OPAL_SUCCESS) {
717 pe_warn(slave, "OPAL error %ld %s to parent PELTV\n",
718 rc, desc);
719 return -ENXIO;
720 }
721 }
722
723 return 0;
724}
725
726static int pnv_ioda_set_peltv(struct pnv_phb *phb,
727 struct pnv_ioda_pe *pe,
728 bool is_add)
729{
730 struct pnv_ioda_pe *slave;
731 struct pci_dev *pdev = NULL;
732 int ret;
733
734
735
736
737
738 if (is_add) {
739 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
740 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
741 if (pe->flags & PNV_IODA_PE_MASTER) {
742 list_for_each_entry(slave, &pe->slaves, list)
743 opal_pci_eeh_freeze_clear(phb->opal_id,
744 slave->pe_number,
745 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
746 }
747 }
748
749
750
751
752
753
754
755 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
756 if (ret)
757 return ret;
758
759
760 if (pe->flags & PNV_IODA_PE_MASTER) {
761 list_for_each_entry(slave, &pe->slaves, list) {
762 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
763 if (ret)
764 return ret;
765 }
766 }
767
768 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
769 pdev = pe->pbus->self;
770 else if (pe->flags & PNV_IODA_PE_DEV)
771 pdev = pe->pdev->bus->self;
772#ifdef CONFIG_PCI_IOV
773 else if (pe->flags & PNV_IODA_PE_VF)
774 pdev = pe->parent_dev;
775#endif
776 while (pdev) {
777 struct pci_dn *pdn = pci_get_pdn(pdev);
778 struct pnv_ioda_pe *parent;
779
780 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
781 parent = &phb->ioda.pe_array[pdn->pe_number];
782 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
783 if (ret)
784 return ret;
785 }
786
787 pdev = pdev->bus->self;
788 }
789
790 return 0;
791}
792
793static void pnv_ioda_unset_peltv(struct pnv_phb *phb,
794 struct pnv_ioda_pe *pe,
795 struct pci_dev *parent)
796{
797 int64_t rc;
798
799 while (parent) {
800 struct pci_dn *pdn = pci_get_pdn(parent);
801
802 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
803 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
804 pe->pe_number,
805 OPAL_REMOVE_PE_FROM_DOMAIN);
806
807 }
808 parent = parent->bus->self;
809 }
810
811 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
812 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
813
814
815 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
816 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
817 if (rc)
818 pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc);
819}
820
821int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
822{
823 struct pci_dev *parent;
824 uint8_t bcomp, dcomp, fcomp;
825 int64_t rc;
826 long rid_end, rid;
827
828
829 if (pe->pbus) {
830 int count;
831
832 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
833 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
834 parent = pe->pbus->self;
835 if (pe->flags & PNV_IODA_PE_BUS_ALL)
836 count = resource_size(&pe->pbus->busn_res);
837 else
838 count = 1;
839
840 switch(count) {
841 case 1: bcomp = OpalPciBusAll; break;
842 case 2: bcomp = OpalPciBus7Bits; break;
843 case 4: bcomp = OpalPciBus6Bits; break;
844 case 8: bcomp = OpalPciBus5Bits; break;
845 case 16: bcomp = OpalPciBus4Bits; break;
846 case 32: bcomp = OpalPciBus3Bits; break;
847 default:
848 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
849 count);
850
851 bcomp = OpalPciBusAll;
852 }
853 rid_end = pe->rid + (count << 8);
854 } else {
855#ifdef CONFIG_PCI_IOV
856 if (pe->flags & PNV_IODA_PE_VF)
857 parent = pe->parent_dev;
858 else
859#endif
860 parent = pe->pdev->bus->self;
861 bcomp = OpalPciBusAll;
862 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
863 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
864 rid_end = pe->rid + 1;
865 }
866
867
868 for (rid = pe->rid; rid < rid_end; rid++)
869 phb->ioda.pe_rmap[rid] = IODA_INVALID_PE;
870
871
872
873
874
875 if (phb->type != PNV_PHB_NPU_OCAPI)
876 pnv_ioda_unset_peltv(phb, pe, parent);
877
878 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
879 bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
880 if (rc)
881 pe_err(pe, "OPAL error %lld trying to setup PELT table\n", rc);
882
883 pe->pbus = NULL;
884 pe->pdev = NULL;
885#ifdef CONFIG_PCI_IOV
886 pe->parent_dev = NULL;
887#endif
888
889 return 0;
890}
891
892int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
893{
894 uint8_t bcomp, dcomp, fcomp;
895 long rc, rid_end, rid;
896
897
898 if (pe->pbus) {
899 int count;
900
901 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
902 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
903 if (pe->flags & PNV_IODA_PE_BUS_ALL)
904 count = resource_size(&pe->pbus->busn_res);
905 else
906 count = 1;
907
908 switch(count) {
909 case 1: bcomp = OpalPciBusAll; break;
910 case 2: bcomp = OpalPciBus7Bits; break;
911 case 4: bcomp = OpalPciBus6Bits; break;
912 case 8: bcomp = OpalPciBus5Bits; break;
913 case 16: bcomp = OpalPciBus4Bits; break;
914 case 32: bcomp = OpalPciBus3Bits; break;
915 default:
916 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
917 count);
918
919 bcomp = OpalPciBusAll;
920 }
921 rid_end = pe->rid + (count << 8);
922 } else {
923 bcomp = OpalPciBusAll;
924 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
925 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
926 rid_end = pe->rid + 1;
927 }
928
929
930
931
932
933
934
935 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
936 bcomp, dcomp, fcomp, OPAL_MAP_PE);
937 if (rc) {
938 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
939 return -ENXIO;
940 }
941
942
943
944
945
946 if (phb->type != PNV_PHB_NPU_OCAPI)
947 pnv_ioda_set_peltv(phb, pe, true);
948
949
950 for (rid = pe->rid; rid < rid_end; rid++)
951 phb->ioda.pe_rmap[rid] = pe->pe_number;
952
953
954 if (phb->type != PNV_PHB_IODA1) {
955 pe->mve_number = 0;
956 goto out;
957 }
958
959 pe->mve_number = pe->pe_number;
960 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
961 if (rc != OPAL_SUCCESS) {
962 pe_err(pe, "OPAL error %ld setting up MVE %x\n",
963 rc, pe->mve_number);
964 pe->mve_number = -1;
965 } else {
966 rc = opal_pci_set_mve_enable(phb->opal_id,
967 pe->mve_number, OPAL_ENABLE_MVE);
968 if (rc) {
969 pe_err(pe, "OPAL error %ld enabling MVE %x\n",
970 rc, pe->mve_number);
971 pe->mve_number = -1;
972 }
973 }
974
975out:
976 return 0;
977}
978
979static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
980{
981 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
982 struct pci_dn *pdn = pci_get_pdn(dev);
983 struct pnv_ioda_pe *pe;
984
985 if (!pdn) {
986 pr_err("%s: Device tree node not associated properly\n",
987 pci_name(dev));
988 return NULL;
989 }
990 if (pdn->pe_number != IODA_INVALID_PE)
991 return NULL;
992
993 pe = pnv_ioda_alloc_pe(phb, 1);
994 if (!pe) {
995 pr_warn("%s: Not enough PE# available, disabling device\n",
996 pci_name(dev));
997 return NULL;
998 }
999
1000
1001
1002
1003
1004
1005
1006 pdn->pe_number = pe->pe_number;
1007 pe->flags = PNV_IODA_PE_DEV;
1008 pe->pdev = dev;
1009 pe->pbus = NULL;
1010 pe->mve_number = -1;
1011 pe->rid = dev->bus->number << 8 | pdn->devfn;
1012 pe->device_count++;
1013
1014 pe_info(pe, "Associated device to PE\n");
1015
1016 if (pnv_ioda_configure_pe(phb, pe)) {
1017
1018 pnv_ioda_free_pe(pe);
1019 pdn->pe_number = IODA_INVALID_PE;
1020 pe->pdev = NULL;
1021 return NULL;
1022 }
1023
1024
1025 mutex_lock(&phb->ioda.pe_list_mutex);
1026 list_add_tail(&pe->list, &phb->ioda.pe_list);
1027 mutex_unlock(&phb->ioda.pe_list_mutex);
1028 return pe;
1029}
1030
1031
1032
1033
1034
1035
1036
1037static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
1038{
1039 struct pnv_phb *phb = pci_bus_to_pnvhb(bus);
1040 struct pnv_ioda_pe *pe = NULL;
1041 unsigned int pe_num;
1042
1043
1044
1045
1046
1047 pe_num = phb->ioda.pe_rmap[bus->number << 8];
1048 if (WARN_ON(pe_num != IODA_INVALID_PE)) {
1049 pe = &phb->ioda.pe_array[pe_num];
1050 return NULL;
1051 }
1052
1053
1054 if (pci_is_root_bus(bus))
1055 pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx];
1056
1057
1058 if (!pe)
1059 pe = pnv_ioda_pick_m64_pe(bus, all);
1060
1061
1062 if (!pe)
1063 pe = pnv_ioda_alloc_pe(phb, 1);
1064
1065 if (!pe) {
1066 pr_warn("%s: Not enough PE# available for PCI bus %04x:%02x\n",
1067 __func__, pci_domain_nr(bus), bus->number);
1068 return NULL;
1069 }
1070
1071 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
1072 pe->pbus = bus;
1073 pe->pdev = NULL;
1074 pe->mve_number = -1;
1075 pe->rid = bus->busn_res.start << 8;
1076
1077 if (all)
1078 pe_info(pe, "Secondary bus %pad..%pad associated with PE#%x\n",
1079 &bus->busn_res.start, &bus->busn_res.end,
1080 pe->pe_number);
1081 else
1082 pe_info(pe, "Secondary bus %pad associated with PE#%x\n",
1083 &bus->busn_res.start, pe->pe_number);
1084
1085 if (pnv_ioda_configure_pe(phb, pe)) {
1086
1087 pnv_ioda_free_pe(pe);
1088 pe->pbus = NULL;
1089 return NULL;
1090 }
1091
1092
1093 list_add_tail(&pe->list, &phb->ioda.pe_list);
1094
1095 return pe;
1096}
1097
1098static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
1099 struct pnv_ioda_pe *pe);
1100
1101static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev)
1102{
1103 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
1104 struct pci_dn *pdn = pci_get_pdn(pdev);
1105 struct pnv_ioda_pe *pe;
1106
1107
1108 pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8));
1109 if (!pe) {
1110
1111 if (WARN_ON(pdev->is_virtfn))
1112 return;
1113
1114 pnv_pci_configure_bus(pdev->bus);
1115 pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8));
1116 pci_info(pdev, "Configured PE#%x\n", pe ? pe->pe_number : 0xfffff);
1117
1118
1119
1120
1121
1122
1123 if (WARN_ON(!pe))
1124 return;
1125 } else {
1126 pci_info(pdev, "Added to existing PE#%x\n", pe->pe_number);
1127 }
1128
1129
1130
1131
1132
1133 if (!pe->dma_setup_done && !pci_is_bridge(pdev)) {
1134 switch (phb->type) {
1135 case PNV_PHB_IODA1:
1136 pnv_pci_ioda1_setup_dma_pe(phb, pe);
1137 break;
1138 case PNV_PHB_IODA2:
1139 pnv_pci_ioda2_setup_dma_pe(phb, pe);
1140 break;
1141 default:
1142 pr_warn("%s: No DMA for PHB#%x (type %d)\n",
1143 __func__, phb->hose->global_number, phb->type);
1144 }
1145 }
1146
1147 if (pdn)
1148 pdn->pe_number = pe->pe_number;
1149 pe->device_count++;
1150
1151 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
1152 pdev->dev.archdata.dma_offset = pe->tce_bypass_base;
1153 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
1154
1155
1156 if (pe->table_group.group)
1157 iommu_add_device(&pe->table_group, &pdev->dev);
1158}
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177static int pnv_pci_ioda_dma_64bit_bypass(struct pnv_ioda_pe *pe)
1178{
1179 u64 window_size, table_size, tce_count, addr;
1180 struct page *table_pages;
1181 u64 tce_order = 28;
1182 __be64 *tces;
1183 s64 rc;
1184
1185
1186
1187
1188
1189 window_size = roundup_pow_of_two(memory_hotplug_max() + (1ULL << 32));
1190 tce_count = window_size >> tce_order;
1191 table_size = tce_count << 3;
1192
1193 if (table_size < PAGE_SIZE)
1194 table_size = PAGE_SIZE;
1195
1196 table_pages = alloc_pages_node(pe->phb->hose->node, GFP_KERNEL,
1197 get_order(table_size));
1198 if (!table_pages)
1199 goto err;
1200
1201 tces = page_address(table_pages);
1202 if (!tces)
1203 goto err;
1204
1205 memset(tces, 0, table_size);
1206
1207 for (addr = 0; addr < memory_hotplug_max(); addr += (1 << tce_order)) {
1208 tces[(addr + (1ULL << 32)) >> tce_order] =
1209 cpu_to_be64(addr | TCE_PCI_READ | TCE_PCI_WRITE);
1210 }
1211
1212 rc = opal_pci_map_pe_dma_window(pe->phb->opal_id,
1213 pe->pe_number,
1214
1215 (pe->pe_number << 1) + 0,
1216 1,
1217 __pa(tces),
1218 table_size,
1219 1 << tce_order);
1220 if (rc == OPAL_SUCCESS) {
1221 pe_info(pe, "Using 64-bit DMA iommu bypass (through TVE#0)\n");
1222 return 0;
1223 }
1224err:
1225 pe_err(pe, "Error configuring 64-bit DMA bypass\n");
1226 return -EIO;
1227}
1228
1229static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev,
1230 u64 dma_mask)
1231{
1232 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
1233 struct pci_dn *pdn = pci_get_pdn(pdev);
1234 struct pnv_ioda_pe *pe;
1235
1236 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1237 return false;
1238
1239 pe = &phb->ioda.pe_array[pdn->pe_number];
1240 if (pe->tce_bypass_enabled) {
1241 u64 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
1242 if (dma_mask >= top)
1243 return true;
1244 }
1245
1246
1247
1248
1249
1250
1251
1252 if (dma_mask >> 32 &&
1253 dma_mask > (memory_hotplug_max() + (1ULL << 32)) &&
1254
1255 (pe->device_count == 1 || !pe->pbus) &&
1256 phb->model == PNV_PHB_MODEL_PHB3) {
1257
1258 s64 rc = pnv_pci_ioda_dma_64bit_bypass(pe);
1259 if (rc)
1260 return false;
1261
1262 pdev->dev.archdata.dma_offset = (1ULL << 32);
1263 return true;
1264 }
1265
1266 return false;
1267}
1268
1269static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb,
1270 bool real_mode)
1271{
1272 return real_mode ? (__be64 __iomem *)(phb->regs_phys + 0x210) :
1273 (phb->regs + 0x210);
1274}
1275
1276static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
1277 unsigned long index, unsigned long npages, bool rm)
1278{
1279 struct iommu_table_group_link *tgl = list_first_entry_or_null(
1280 &tbl->it_group_list, struct iommu_table_group_link,
1281 next);
1282 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
1283 struct pnv_ioda_pe, table_group);
1284 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
1285 unsigned long start, end, inc;
1286
1287 start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
1288 end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
1289 npages - 1);
1290
1291
1292 start |= (1ull << 63);
1293 end |= (1ull << 63);
1294 inc = 16;
1295 end |= inc - 1;
1296
1297 mb();
1298 while (start <= end) {
1299 if (rm)
1300 __raw_rm_writeq_be(start, invalidate);
1301 else
1302 __raw_writeq_be(start, invalidate);
1303
1304 start += inc;
1305 }
1306
1307
1308
1309
1310
1311}
1312
1313static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
1314 long npages, unsigned long uaddr,
1315 enum dma_data_direction direction,
1316 unsigned long attrs)
1317{
1318 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1319 attrs);
1320
1321 if (!ret)
1322 pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
1323
1324 return ret;
1325}
1326
1327#ifdef CONFIG_IOMMU_API
1328
1329static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index,
1330 unsigned long *hpa, enum dma_data_direction *direction,
1331 bool realmode)
1332{
1333 return pnv_tce_xchg(tbl, index, hpa, direction, !realmode);
1334}
1335#endif
1336
1337static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
1338 long npages)
1339{
1340 pnv_tce_free(tbl, index, npages);
1341
1342 pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
1343}
1344
1345static struct iommu_table_ops pnv_ioda1_iommu_ops = {
1346 .set = pnv_ioda1_tce_build,
1347#ifdef CONFIG_IOMMU_API
1348 .xchg_no_kill = pnv_ioda_tce_xchg_no_kill,
1349 .tce_kill = pnv_pci_p7ioc_tce_invalidate,
1350 .useraddrptr = pnv_tce_useraddrptr,
1351#endif
1352 .clear = pnv_ioda1_tce_free,
1353 .get = pnv_tce_get,
1354};
1355
1356#define PHB3_TCE_KILL_INVAL_ALL PPC_BIT(0)
1357#define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1)
1358#define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2)
1359
1360static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
1361{
1362
1363 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
1364 unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
1365
1366 mb();
1367 __raw_writeq_be(val, invalidate);
1368}
1369
1370static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
1371 unsigned shift, unsigned long index,
1372 unsigned long npages)
1373{
1374 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
1375 unsigned long start, end, inc;
1376
1377
1378 start = PHB3_TCE_KILL_INVAL_ONE;
1379 start |= (pe->pe_number & 0xFF);
1380 end = start;
1381
1382
1383 start |= (index << shift);
1384 end |= ((index + npages - 1) << shift);
1385 inc = (0x1ull << shift);
1386 mb();
1387
1388 while (start <= end) {
1389 if (rm)
1390 __raw_rm_writeq_be(start, invalidate);
1391 else
1392 __raw_writeq_be(start, invalidate);
1393 start += inc;
1394 }
1395}
1396
1397static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
1398{
1399 struct pnv_phb *phb = pe->phb;
1400
1401 if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
1402 pnv_pci_phb3_tce_invalidate_pe(pe);
1403 else
1404 opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PE,
1405 pe->pe_number, 0, 0, 0);
1406}
1407
1408static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
1409 unsigned long index, unsigned long npages, bool rm)
1410{
1411 struct iommu_table_group_link *tgl;
1412
1413 list_for_each_entry_lockless(tgl, &tbl->it_group_list, next) {
1414 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
1415 struct pnv_ioda_pe, table_group);
1416 struct pnv_phb *phb = pe->phb;
1417 unsigned int shift = tbl->it_page_shift;
1418
1419 if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
1420 pnv_pci_phb3_tce_invalidate(pe, rm, shift,
1421 index, npages);
1422 else
1423 opal_pci_tce_kill(phb->opal_id,
1424 OPAL_PCI_TCE_KILL_PAGES,
1425 pe->pe_number, 1u << shift,
1426 index << shift, npages);
1427 }
1428}
1429
1430static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
1431 long npages, unsigned long uaddr,
1432 enum dma_data_direction direction,
1433 unsigned long attrs)
1434{
1435 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1436 attrs);
1437
1438 if (!ret)
1439 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
1440
1441 return ret;
1442}
1443
1444static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
1445 long npages)
1446{
1447 pnv_tce_free(tbl, index, npages);
1448
1449 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
1450}
1451
1452static struct iommu_table_ops pnv_ioda2_iommu_ops = {
1453 .set = pnv_ioda2_tce_build,
1454#ifdef CONFIG_IOMMU_API
1455 .xchg_no_kill = pnv_ioda_tce_xchg_no_kill,
1456 .tce_kill = pnv_pci_ioda2_tce_invalidate,
1457 .useraddrptr = pnv_tce_useraddrptr,
1458#endif
1459 .clear = pnv_ioda2_tce_free,
1460 .get = pnv_tce_get,
1461 .free = pnv_pci_ioda2_table_free_pages,
1462};
1463
1464static int pnv_pci_ioda_dev_dma_weight(struct pci_dev *dev, void *data)
1465{
1466 unsigned int *weight = (unsigned int *)data;
1467
1468
1469
1470
1471 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
1472 return 0;
1473
1474 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
1475 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
1476 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
1477 *weight += 3;
1478 else if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
1479 *weight += 15;
1480 else
1481 *weight += 10;
1482
1483 return 0;
1484}
1485
1486static unsigned int pnv_pci_ioda_pe_dma_weight(struct pnv_ioda_pe *pe)
1487{
1488 unsigned int weight = 0;
1489
1490
1491#ifdef CONFIG_PCI_IOV
1492 if ((pe->flags & PNV_IODA_PE_VF) && pe->parent_dev) {
1493 pnv_pci_ioda_dev_dma_weight(pe->parent_dev, &weight);
1494 return weight;
1495 }
1496#endif
1497
1498 if ((pe->flags & PNV_IODA_PE_DEV) && pe->pdev) {
1499 pnv_pci_ioda_dev_dma_weight(pe->pdev, &weight);
1500 } else if ((pe->flags & PNV_IODA_PE_BUS) && pe->pbus) {
1501 struct pci_dev *pdev;
1502
1503 list_for_each_entry(pdev, &pe->pbus->devices, bus_list)
1504 pnv_pci_ioda_dev_dma_weight(pdev, &weight);
1505 } else if ((pe->flags & PNV_IODA_PE_BUS_ALL) && pe->pbus) {
1506 pci_walk_bus(pe->pbus, pnv_pci_ioda_dev_dma_weight, &weight);
1507 }
1508
1509 return weight;
1510}
1511
1512static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
1513 struct pnv_ioda_pe *pe)
1514{
1515
1516 struct page *tce_mem = NULL;
1517 struct iommu_table *tbl;
1518 unsigned int weight, total_weight = 0;
1519 unsigned int tce32_segsz, base, segs, avail, i;
1520 int64_t rc;
1521 void *addr;
1522
1523
1524
1525
1526 weight = pnv_pci_ioda_pe_dma_weight(pe);
1527 if (!weight)
1528 return;
1529
1530 pci_walk_bus(phb->hose->bus, pnv_pci_ioda_dev_dma_weight,
1531 &total_weight);
1532 segs = (weight * phb->ioda.dma32_count) / total_weight;
1533 if (!segs)
1534 segs = 1;
1535
1536
1537
1538
1539
1540
1541
1542 do {
1543 for (base = 0; base <= phb->ioda.dma32_count - segs; base++) {
1544 for (avail = 0, i = base; i < base + segs; i++) {
1545 if (phb->ioda.dma32_segmap[i] ==
1546 IODA_INVALID_PE)
1547 avail++;
1548 }
1549
1550 if (avail == segs)
1551 goto found;
1552 }
1553 } while (--segs);
1554
1555 if (!segs) {
1556 pe_warn(pe, "No available DMA32 segments\n");
1557 return;
1558 }
1559
1560found:
1561 tbl = pnv_pci_table_alloc(phb->hose->node);
1562 if (WARN_ON(!tbl))
1563 return;
1564
1565 iommu_register_group(&pe->table_group, phb->hose->global_number,
1566 pe->pe_number);
1567 pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
1568
1569
1570 pe_info(pe, "DMA weight %d (%d), assigned (%d) %d DMA32 segments\n",
1571 weight, total_weight, base, segs);
1572 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
1573 base * PNV_IODA1_DMA32_SEGSIZE,
1574 (base + segs) * PNV_IODA1_DMA32_SEGSIZE - 1);
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584 tce32_segsz = PNV_IODA1_DMA32_SEGSIZE >> (IOMMU_PAGE_SHIFT_4K - 3);
1585 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
1586 get_order(tce32_segsz * segs));
1587 if (!tce_mem) {
1588 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
1589 goto fail;
1590 }
1591 addr = page_address(tce_mem);
1592 memset(addr, 0, tce32_segsz * segs);
1593
1594
1595 for (i = 0; i < segs; i++) {
1596 rc = opal_pci_map_pe_dma_window(phb->opal_id,
1597 pe->pe_number,
1598 base + i, 1,
1599 __pa(addr) + tce32_segsz * i,
1600 tce32_segsz, IOMMU_PAGE_SIZE_4K);
1601 if (rc) {
1602 pe_err(pe, " Failed to configure 32-bit TCE table, err %lld\n",
1603 rc);
1604 goto fail;
1605 }
1606 }
1607
1608
1609 for (i = base; i < base + segs; i++)
1610 phb->ioda.dma32_segmap[i] = pe->pe_number;
1611
1612
1613 pnv_pci_setup_iommu_table(tbl, addr, tce32_segsz * segs,
1614 base * PNV_IODA1_DMA32_SEGSIZE,
1615 IOMMU_PAGE_SHIFT_4K);
1616
1617 tbl->it_ops = &pnv_ioda1_iommu_ops;
1618 pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
1619 pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
1620 if (!iommu_init_table(tbl, phb->hose->node, 0, 0))
1621 panic("Failed to initialize iommu table");
1622
1623 pe->dma_setup_done = true;
1624 return;
1625 fail:
1626
1627 if (tce_mem)
1628 __free_pages(tce_mem, get_order(tce32_segsz * segs));
1629 if (tbl) {
1630 pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
1631 iommu_tce_table_put(tbl);
1632 }
1633}
1634
1635static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
1636 int num, struct iommu_table *tbl)
1637{
1638 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1639 table_group);
1640 struct pnv_phb *phb = pe->phb;
1641 int64_t rc;
1642 const unsigned long size = tbl->it_indirect_levels ?
1643 tbl->it_level_size : tbl->it_size;
1644 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
1645 const __u64 win_size = tbl->it_size << tbl->it_page_shift;
1646
1647 pe_info(pe, "Setting up window#%d %llx..%llx pg=%lx\n",
1648 num, start_addr, start_addr + win_size - 1,
1649 IOMMU_PAGE_SIZE(tbl));
1650
1651
1652
1653
1654
1655 rc = opal_pci_map_pe_dma_window(phb->opal_id,
1656 pe->pe_number,
1657 (pe->pe_number << 1) + num,
1658 tbl->it_indirect_levels + 1,
1659 __pa(tbl->it_base),
1660 size << 3,
1661 IOMMU_PAGE_SIZE(tbl));
1662 if (rc) {
1663 pe_err(pe, "Failed to configure TCE table, err %lld\n", rc);
1664 return rc;
1665 }
1666
1667 pnv_pci_link_table_and_group(phb->hose->node, num,
1668 tbl, &pe->table_group);
1669 pnv_pci_ioda2_tce_invalidate_pe(pe);
1670
1671 return 0;
1672}
1673
1674static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
1675{
1676 uint16_t window_id = (pe->pe_number << 1 ) + 1;
1677 int64_t rc;
1678
1679 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
1680 if (enable) {
1681 phys_addr_t top = memblock_end_of_DRAM();
1682
1683 top = roundup_pow_of_two(top);
1684 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1685 pe->pe_number,
1686 window_id,
1687 pe->tce_bypass_base,
1688 top);
1689 } else {
1690 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1691 pe->pe_number,
1692 window_id,
1693 pe->tce_bypass_base,
1694 0);
1695 }
1696 if (rc)
1697 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
1698 else
1699 pe->tce_bypass_enabled = enable;
1700}
1701
1702static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
1703 int num, __u32 page_shift, __u64 window_size, __u32 levels,
1704 bool alloc_userspace_copy, struct iommu_table **ptbl)
1705{
1706 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1707 table_group);
1708 int nid = pe->phb->hose->node;
1709 __u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start;
1710 long ret;
1711 struct iommu_table *tbl;
1712
1713 tbl = pnv_pci_table_alloc(nid);
1714 if (!tbl)
1715 return -ENOMEM;
1716
1717 tbl->it_ops = &pnv_ioda2_iommu_ops;
1718
1719 ret = pnv_pci_ioda2_table_alloc_pages(nid,
1720 bus_offset, page_shift, window_size,
1721 levels, alloc_userspace_copy, tbl);
1722 if (ret) {
1723 iommu_tce_table_put(tbl);
1724 return ret;
1725 }
1726
1727 *ptbl = tbl;
1728
1729 return 0;
1730}
1731
1732static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
1733{
1734 struct iommu_table *tbl = NULL;
1735 long rc;
1736 unsigned long res_start, res_end;
1737
1738
1739
1740
1741
1742
1743 const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max());
1744
1745
1746
1747
1748
1749
1750 const u64 maxblock = 1UL << (PAGE_SHIFT + MAX_ORDER - 1);
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760 const u64 window_size = min((maxblock * 8) << PAGE_SHIFT, max_memory);
1761
1762 unsigned long tces_order = ilog2(window_size >> PAGE_SHIFT);
1763 unsigned long tcelevel_order = ilog2(maxblock >> 3);
1764 unsigned int levels = tces_order / tcelevel_order;
1765
1766 if (tces_order % tcelevel_order)
1767 levels += 1;
1768
1769
1770
1771
1772 levels = max_t(unsigned int, levels, POWERNV_IOMMU_DEFAULT_LEVELS);
1773
1774 rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, PAGE_SHIFT,
1775 window_size, levels, false, &tbl);
1776 if (rc) {
1777 pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
1778 rc);
1779 return rc;
1780 }
1781
1782
1783 res_start = 0;
1784 res_end = 0;
1785 if (window_size > pe->phb->ioda.m32_pci_base) {
1786 res_start = pe->phb->ioda.m32_pci_base >> tbl->it_page_shift;
1787 res_end = min(window_size, SZ_4G) >> tbl->it_page_shift;
1788 }
1789
1790 if (iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end))
1791 rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
1792 else
1793 rc = -ENOMEM;
1794 if (rc) {
1795 pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", rc);
1796 iommu_tce_table_put(tbl);
1797 tbl = NULL;
1798 }
1799 if (!pnv_iommu_bypass_disabled)
1800 pnv_pci_ioda2_set_bypass(pe, true);
1801
1802
1803
1804
1805
1806
1807 if (pe->pdev)
1808 set_iommu_table_base(&pe->pdev->dev, tbl);
1809
1810 return 0;
1811}
1812
1813static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
1814 int num)
1815{
1816 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1817 table_group);
1818 struct pnv_phb *phb = pe->phb;
1819 long ret;
1820
1821 pe_info(pe, "Removing DMA window #%d\n", num);
1822
1823 ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
1824 (pe->pe_number << 1) + num,
1825 0, 0,
1826 0, 0);
1827 if (ret)
1828 pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
1829 else
1830 pnv_pci_ioda2_tce_invalidate_pe(pe);
1831
1832 pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
1833
1834 return ret;
1835}
1836
1837#ifdef CONFIG_IOMMU_API
1838unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
1839 __u64 window_size, __u32 levels)
1840{
1841 unsigned long bytes = 0;
1842 const unsigned window_shift = ilog2(window_size);
1843 unsigned entries_shift = window_shift - page_shift;
1844 unsigned table_shift = entries_shift + 3;
1845 unsigned long tce_table_size = max(0x1000UL, 1UL << table_shift);
1846 unsigned long direct_table_size;
1847
1848 if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS) ||
1849 !is_power_of_2(window_size))
1850 return 0;
1851
1852
1853 entries_shift = (entries_shift + levels - 1) / levels;
1854 table_shift = entries_shift + 3;
1855 table_shift = max_t(unsigned, table_shift, PAGE_SHIFT);
1856 direct_table_size = 1UL << table_shift;
1857
1858 for ( ; levels; --levels) {
1859 bytes += ALIGN(tce_table_size, direct_table_size);
1860
1861 tce_table_size /= direct_table_size;
1862 tce_table_size <<= 3;
1863 tce_table_size = max_t(unsigned long,
1864 tce_table_size, direct_table_size);
1865 }
1866
1867 return bytes + bytes;
1868}
1869
1870static long pnv_pci_ioda2_create_table_userspace(
1871 struct iommu_table_group *table_group,
1872 int num, __u32 page_shift, __u64 window_size, __u32 levels,
1873 struct iommu_table **ptbl)
1874{
1875 long ret = pnv_pci_ioda2_create_table(table_group,
1876 num, page_shift, window_size, levels, true, ptbl);
1877
1878 if (!ret)
1879 (*ptbl)->it_allocated_size = pnv_pci_ioda2_get_table_size(
1880 page_shift, window_size, levels);
1881 return ret;
1882}
1883
1884static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
1885{
1886 struct pci_dev *dev;
1887
1888 list_for_each_entry(dev, &bus->devices, bus_list) {
1889 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
1890 dev->dev.archdata.dma_offset = pe->tce_bypass_base;
1891
1892 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
1893 pnv_ioda_setup_bus_dma(pe, dev->subordinate);
1894 }
1895}
1896
1897static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
1898{
1899 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1900 table_group);
1901
1902 struct iommu_table *tbl = pe->table_group.tables[0];
1903
1904 pnv_pci_ioda2_set_bypass(pe, false);
1905 pnv_pci_ioda2_unset_window(&pe->table_group, 0);
1906 if (pe->pbus)
1907 pnv_ioda_setup_bus_dma(pe, pe->pbus);
1908 else if (pe->pdev)
1909 set_iommu_table_base(&pe->pdev->dev, NULL);
1910 iommu_tce_table_put(tbl);
1911}
1912
1913static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
1914{
1915 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
1916 table_group);
1917
1918 pnv_pci_ioda2_setup_default_config(pe);
1919 if (pe->pbus)
1920 pnv_ioda_setup_bus_dma(pe, pe->pbus);
1921}
1922
1923static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
1924 .get_table_size = pnv_pci_ioda2_get_table_size,
1925 .create_table = pnv_pci_ioda2_create_table_userspace,
1926 .set_window = pnv_pci_ioda2_set_window,
1927 .unset_window = pnv_pci_ioda2_unset_window,
1928 .take_ownership = pnv_ioda2_take_ownership,
1929 .release_ownership = pnv_ioda2_release_ownership,
1930};
1931#endif
1932
1933void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1934 struct pnv_ioda_pe *pe)
1935{
1936 int64_t rc;
1937
1938
1939 pe->tce_bypass_base = 1ull << 59;
1940
1941
1942 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
1943 phb->ioda.m32_pci_base);
1944
1945
1946 pe->table_group.tce32_start = 0;
1947 pe->table_group.tce32_size = phb->ioda.m32_pci_base;
1948 pe->table_group.max_dynamic_windows_supported =
1949 IOMMU_TABLE_GROUP_MAX_TABLES;
1950 pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS;
1951 pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb);
1952
1953 rc = pnv_pci_ioda2_setup_default_config(pe);
1954 if (rc)
1955 return;
1956
1957#ifdef CONFIG_IOMMU_API
1958 pe->table_group.ops = &pnv_pci_ioda2_ops;
1959 iommu_register_group(&pe->table_group, phb->hose->global_number,
1960 pe->pe_number);
1961#endif
1962 pe->dma_setup_done = true;
1963}
1964
1965int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq)
1966{
1967 struct pnv_phb *phb = container_of(chip, struct pnv_phb,
1968 ioda.irq_chip);
1969
1970 return opal_pci_msi_eoi(phb->opal_id, hw_irq);
1971}
1972
1973static void pnv_ioda2_msi_eoi(struct irq_data *d)
1974{
1975 int64_t rc;
1976 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1977 struct irq_chip *chip = irq_data_get_irq_chip(d);
1978
1979 rc = pnv_opal_pci_msi_eoi(chip, hw_irq);
1980 WARN_ON_ONCE(rc);
1981
1982 icp_native_eoi(d);
1983}
1984
1985
1986void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
1987{
1988 struct irq_data *idata;
1989 struct irq_chip *ichip;
1990
1991
1992 if (phb->model != PNV_PHB_MODEL_PHB3)
1993 return;
1994
1995 if (!phb->ioda.irq_chip_init) {
1996
1997
1998
1999
2000 idata = irq_get_irq_data(virq);
2001 ichip = irq_data_get_irq_chip(idata);
2002 phb->ioda.irq_chip_init = 1;
2003 phb->ioda.irq_chip = *ichip;
2004 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
2005 }
2006 irq_set_chip(virq, &phb->ioda.irq_chip);
2007}
2008
2009
2010
2011
2012
2013bool is_pnv_opal_msi(struct irq_chip *chip)
2014{
2015 return chip->irq_eoi == pnv_ioda2_msi_eoi;
2016}
2017EXPORT_SYMBOL_GPL(is_pnv_opal_msi);
2018
2019static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
2020 unsigned int hwirq, unsigned int virq,
2021 unsigned int is_64, struct msi_msg *msg)
2022{
2023 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
2024 unsigned int xive_num = hwirq - phb->msi_base;
2025 __be32 data;
2026 int rc;
2027
2028
2029 if (pe == NULL)
2030 return -ENXIO;
2031
2032
2033 if (pe->mve_number < 0)
2034 return -ENXIO;
2035
2036
2037 if (dev->no_64bit_msi)
2038 is_64 = 0;
2039
2040
2041 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2042 if (rc) {
2043 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
2044 pci_name(dev), rc, xive_num);
2045 return -EIO;
2046 }
2047
2048 if (is_64) {
2049 __be64 addr64;
2050
2051 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
2052 &addr64, &data);
2053 if (rc) {
2054 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
2055 pci_name(dev), rc);
2056 return -EIO;
2057 }
2058 msg->address_hi = be64_to_cpu(addr64) >> 32;
2059 msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
2060 } else {
2061 __be32 addr32;
2062
2063 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
2064 &addr32, &data);
2065 if (rc) {
2066 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
2067 pci_name(dev), rc);
2068 return -EIO;
2069 }
2070 msg->address_hi = 0;
2071 msg->address_lo = be32_to_cpu(addr32);
2072 }
2073 msg->data = be32_to_cpu(data);
2074
2075 pnv_set_msi_irq_chip(phb, virq);
2076
2077 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
2078 " address=%x_%08x data=%x PE# %x\n",
2079 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
2080 msg->address_hi, msg->address_lo, data, pe->pe_number);
2081
2082 return 0;
2083}
2084
2085static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
2086{
2087 unsigned int count;
2088 const __be32 *prop = of_get_property(phb->hose->dn,
2089 "ibm,opal-msi-ranges", NULL);
2090 if (!prop) {
2091
2092 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
2093 }
2094 if (!prop)
2095 return;
2096
2097 phb->msi_base = be32_to_cpup(prop);
2098 count = be32_to_cpup(prop + 1);
2099 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
2100 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
2101 phb->hose->global_number);
2102 return;
2103 }
2104
2105 phb->msi_setup = pnv_pci_ioda_msi_setup;
2106 phb->msi32_support = 1;
2107 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
2108 count, phb->msi_base);
2109}
2110
2111static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
2112 struct resource *res)
2113{
2114 struct pnv_phb *phb = pe->phb;
2115 struct pci_bus_region region;
2116 int index;
2117 int64_t rc;
2118
2119 if (!res || !res->flags || res->start > res->end)
2120 return;
2121
2122 if (res->flags & IORESOURCE_IO) {
2123 region.start = res->start - phb->ioda.io_pci_base;
2124 region.end = res->end - phb->ioda.io_pci_base;
2125 index = region.start / phb->ioda.io_segsize;
2126
2127 while (index < phb->ioda.total_pe_num &&
2128 region.start <= region.end) {
2129 phb->ioda.io_segmap[index] = pe->pe_number;
2130 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2131 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
2132 if (rc != OPAL_SUCCESS) {
2133 pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n",
2134 __func__, rc, index, pe->pe_number);
2135 break;
2136 }
2137
2138 region.start += phb->ioda.io_segsize;
2139 index++;
2140 }
2141 } else if ((res->flags & IORESOURCE_MEM) &&
2142 !pnv_pci_is_m64(phb, res)) {
2143 region.start = res->start -
2144 phb->hose->mem_offset[0] -
2145 phb->ioda.m32_pci_base;
2146 region.end = res->end -
2147 phb->hose->mem_offset[0] -
2148 phb->ioda.m32_pci_base;
2149 index = region.start / phb->ioda.m32_segsize;
2150
2151 while (index < phb->ioda.total_pe_num &&
2152 region.start <= region.end) {
2153 phb->ioda.m32_segmap[index] = pe->pe_number;
2154 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2155 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
2156 if (rc != OPAL_SUCCESS) {
2157 pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x",
2158 __func__, rc, index, pe->pe_number);
2159 break;
2160 }
2161
2162 region.start += phb->ioda.m32_segsize;
2163 index++;
2164 }
2165 }
2166}
2167
2168
2169
2170
2171
2172
2173static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
2174{
2175 struct pci_dev *pdev;
2176 int i;
2177
2178
2179
2180
2181
2182
2183 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
2184
2185 list_for_each_entry(pdev, &pe->pbus->devices, bus_list) {
2186 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
2187 pnv_ioda_setup_pe_res(pe, &pdev->resource[i]);
2188
2189
2190
2191
2192
2193
2194 if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev))
2195 continue;
2196 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
2197 pnv_ioda_setup_pe_res(pe,
2198 &pdev->resource[PCI_BRIDGE_RESOURCES + i]);
2199 }
2200}
2201
2202#ifdef CONFIG_DEBUG_FS
2203static int pnv_pci_diag_data_set(void *data, u64 val)
2204{
2205 struct pnv_phb *phb = data;
2206 s64 ret;
2207
2208
2209 ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
2210 phb->diag_data_size);
2211 if (ret != OPAL_SUCCESS)
2212 return -EIO;
2213
2214
2215 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
2216 return 0;
2217}
2218
2219DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_diag_data_fops, NULL, pnv_pci_diag_data_set,
2220 "%llu\n");
2221
2222static int pnv_pci_ioda_pe_dump(void *data, u64 val)
2223{
2224 struct pnv_phb *phb = data;
2225 int pe_num;
2226
2227 for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) {
2228 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_num];
2229
2230 if (!test_bit(pe_num, phb->ioda.pe_alloc))
2231 continue;
2232
2233 pe_warn(pe, "rid: %04x dev count: %2d flags: %s%s%s%s%s%s\n",
2234 pe->rid, pe->device_count,
2235 (pe->flags & PNV_IODA_PE_DEV) ? "dev " : "",
2236 (pe->flags & PNV_IODA_PE_BUS) ? "bus " : "",
2237 (pe->flags & PNV_IODA_PE_BUS_ALL) ? "all " : "",
2238 (pe->flags & PNV_IODA_PE_MASTER) ? "master " : "",
2239 (pe->flags & PNV_IODA_PE_SLAVE) ? "slave " : "",
2240 (pe->flags & PNV_IODA_PE_VF) ? "vf " : "");
2241 }
2242
2243 return 0;
2244}
2245
2246DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_ioda_pe_dump_fops, NULL,
2247 pnv_pci_ioda_pe_dump, "%llu\n");
2248
2249#endif
2250
2251static void pnv_pci_ioda_create_dbgfs(void)
2252{
2253#ifdef CONFIG_DEBUG_FS
2254 struct pci_controller *hose, *tmp;
2255 struct pnv_phb *phb;
2256 char name[16];
2257
2258 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2259 phb = hose->private_data;
2260
2261 sprintf(name, "PCI%04x", hose->global_number);
2262 phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
2263
2264 debugfs_create_file_unsafe("dump_diag_regs", 0200, phb->dbgfs,
2265 phb, &pnv_pci_diag_data_fops);
2266 debugfs_create_file_unsafe("dump_ioda_pe_state", 0200, phb->dbgfs,
2267 phb, &pnv_pci_ioda_pe_dump_fops);
2268 }
2269#endif
2270}
2271
2272static void pnv_pci_enable_bridge(struct pci_bus *bus)
2273{
2274 struct pci_dev *dev = bus->self;
2275 struct pci_bus *child;
2276
2277
2278 if (list_empty(&bus->devices))
2279 return;
2280
2281
2282
2283
2284
2285
2286
2287 if (dev) {
2288 int rc = pci_enable_device(dev);
2289 if (rc)
2290 pci_err(dev, "Error enabling bridge (%d)\n", rc);
2291 pci_set_master(dev);
2292 }
2293
2294
2295 list_for_each_entry(child, &bus->children, node)
2296 pnv_pci_enable_bridge(child);
2297}
2298
2299static void pnv_pci_enable_bridges(void)
2300{
2301 struct pci_controller *hose;
2302
2303 list_for_each_entry(hose, &hose_list, list_node)
2304 pnv_pci_enable_bridge(hose->bus);
2305}
2306
2307static void pnv_pci_ioda_fixup(void)
2308{
2309 pnv_pci_ioda_create_dbgfs();
2310
2311 pnv_pci_enable_bridges();
2312
2313#ifdef CONFIG_EEH
2314 pnv_eeh_post_init();
2315#endif
2316}
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
2331 unsigned long type)
2332{
2333 struct pnv_phb *phb = pci_bus_to_pnvhb(bus);
2334 int num_pci_bridges = 0;
2335 struct pci_dev *bridge;
2336
2337 bridge = bus->self;
2338 while (bridge) {
2339 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
2340 num_pci_bridges++;
2341 if (num_pci_bridges >= 2)
2342 return 1;
2343 }
2344
2345 bridge = bridge->bus->self;
2346 }
2347
2348
2349
2350
2351
2352
2353 if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type))
2354 return phb->ioda.m64_segsize;
2355 if (type & IORESOURCE_MEM)
2356 return phb->ioda.m32_segsize;
2357
2358 return phb->ioda.io_segsize;
2359}
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
2370 unsigned long type)
2371{
2372 struct pci_controller *hose = pci_bus_to_host(bus);
2373 struct pnv_phb *phb = hose->private_data;
2374 struct pci_dev *bridge = bus->self;
2375 struct resource *r, *w;
2376 bool msi_region = false;
2377 int i;
2378
2379
2380 if (!pci_is_root_bus(bridge->bus) &&
2381 !pci_is_root_bus(bridge->bus->self->bus))
2382 return;
2383
2384
2385 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
2386 r = &bridge->resource[PCI_BRIDGE_RESOURCES + i];
2387 if (!r->flags || !r->parent)
2388 continue;
2389
2390 w = NULL;
2391 if (r->flags & type & IORESOURCE_IO)
2392 w = &hose->io_resource;
2393 else if (pnv_pci_is_m64(phb, r) &&
2394 (type & IORESOURCE_PREFETCH) &&
2395 phb->ioda.m64_segsize)
2396 w = &hose->mem_resources[1];
2397 else if (r->flags & type & IORESOURCE_MEM) {
2398 w = &hose->mem_resources[0];
2399 msi_region = true;
2400 }
2401
2402 r->start = w->start;
2403 r->end = w->end;
2404
2405
2406
2407
2408
2409
2410
2411
2412 if (msi_region) {
2413 r->end += 0x10000;
2414 r->end -= 0x100000;
2415 }
2416 }
2417}
2418
2419static void pnv_pci_configure_bus(struct pci_bus *bus)
2420{
2421 struct pci_dev *bridge = bus->self;
2422 struct pnv_ioda_pe *pe;
2423 bool all = (bridge && pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE);
2424
2425 dev_info(&bus->dev, "Configuring PE for bus\n");
2426
2427
2428 if (WARN_ON(list_empty(&bus->devices)))
2429 return;
2430
2431
2432 pnv_ioda_reserve_m64_pe(bus, NULL, all);
2433
2434
2435
2436
2437
2438
2439 pe = pnv_ioda_setup_bus_PE(bus, all);
2440 if (!pe)
2441 return;
2442
2443 pnv_ioda_setup_pe_seg(pe);
2444}
2445
2446static resource_size_t pnv_pci_default_alignment(void)
2447{
2448 return PAGE_SIZE;
2449}
2450
2451
2452
2453
2454static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
2455{
2456 struct pci_dn *pdn;
2457
2458 pdn = pci_get_pdn(dev);
2459 if (!pdn || pdn->pe_number == IODA_INVALID_PE) {
2460 pci_err(dev, "pci_enable_device() blocked, no PE assigned.\n");
2461 return false;
2462 }
2463
2464 return true;
2465}
2466
2467static bool pnv_ocapi_enable_device_hook(struct pci_dev *dev)
2468{
2469 struct pci_dn *pdn;
2470 struct pnv_ioda_pe *pe;
2471
2472 pdn = pci_get_pdn(dev);
2473 if (!pdn)
2474 return false;
2475
2476 if (pdn->pe_number == IODA_INVALID_PE) {
2477 pe = pnv_ioda_setup_dev_PE(dev);
2478 if (!pe)
2479 return false;
2480 }
2481 return true;
2482}
2483
2484static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group,
2485 int num)
2486{
2487 struct pnv_ioda_pe *pe = container_of(table_group,
2488 struct pnv_ioda_pe, table_group);
2489 struct pnv_phb *phb = pe->phb;
2490 unsigned int idx;
2491 long rc;
2492
2493 pe_info(pe, "Removing DMA window #%d\n", num);
2494 for (idx = 0; idx < phb->ioda.dma32_count; idx++) {
2495 if (phb->ioda.dma32_segmap[idx] != pe->pe_number)
2496 continue;
2497
2498 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
2499 idx, 0, 0ul, 0ul, 0ul);
2500 if (rc != OPAL_SUCCESS) {
2501 pe_warn(pe, "Failure %ld unmapping DMA32 segment#%d\n",
2502 rc, idx);
2503 return rc;
2504 }
2505
2506 phb->ioda.dma32_segmap[idx] = IODA_INVALID_PE;
2507 }
2508
2509 pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
2510 return OPAL_SUCCESS;
2511}
2512
2513static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
2514{
2515 struct iommu_table *tbl = pe->table_group.tables[0];
2516 int64_t rc;
2517
2518 if (!pe->dma_setup_done)
2519 return;
2520
2521 rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0);
2522 if (rc != OPAL_SUCCESS)
2523 return;
2524
2525 pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false);
2526 if (pe->table_group.group) {
2527 iommu_group_put(pe->table_group.group);
2528 WARN_ON(pe->table_group.group);
2529 }
2530
2531 free_pages(tbl->it_base, get_order(tbl->it_size << 3));
2532 iommu_tce_table_put(tbl);
2533}
2534
2535void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
2536{
2537 struct iommu_table *tbl = pe->table_group.tables[0];
2538 int64_t rc;
2539
2540 if (!pe->dma_setup_done)
2541 return;
2542
2543 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
2544 if (rc)
2545 pe_warn(pe, "OPAL error %lld release DMA window\n", rc);
2546
2547 pnv_pci_ioda2_set_bypass(pe, false);
2548 if (pe->table_group.group) {
2549 iommu_group_put(pe->table_group.group);
2550 WARN_ON(pe->table_group.group);
2551 }
2552
2553 iommu_tce_table_put(tbl);
2554}
2555
2556static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
2557 unsigned short win,
2558 unsigned int *map)
2559{
2560 struct pnv_phb *phb = pe->phb;
2561 int idx;
2562 int64_t rc;
2563
2564 for (idx = 0; idx < phb->ioda.total_pe_num; idx++) {
2565 if (map[idx] != pe->pe_number)
2566 continue;
2567
2568 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2569 phb->ioda.reserved_pe_idx, win, 0, idx);
2570
2571 if (rc != OPAL_SUCCESS)
2572 pe_warn(pe, "Error %lld unmapping (%d) segment#%d\n",
2573 rc, win, idx);
2574
2575 map[idx] = IODA_INVALID_PE;
2576 }
2577}
2578
2579static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe)
2580{
2581 struct pnv_phb *phb = pe->phb;
2582
2583 if (phb->type == PNV_PHB_IODA1) {
2584 pnv_ioda_free_pe_seg(pe, OPAL_IO_WINDOW_TYPE,
2585 phb->ioda.io_segmap);
2586 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
2587 phb->ioda.m32_segmap);
2588
2589 } else if (phb->type == PNV_PHB_IODA2) {
2590 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
2591 phb->ioda.m32_segmap);
2592 }
2593}
2594
2595static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
2596{
2597 struct pnv_phb *phb = pe->phb;
2598 struct pnv_ioda_pe *slave, *tmp;
2599
2600 pe_info(pe, "Releasing PE\n");
2601
2602 mutex_lock(&phb->ioda.pe_list_mutex);
2603 list_del(&pe->list);
2604 mutex_unlock(&phb->ioda.pe_list_mutex);
2605
2606 switch (phb->type) {
2607 case PNV_PHB_IODA1:
2608 pnv_pci_ioda1_release_pe_dma(pe);
2609 break;
2610 case PNV_PHB_IODA2:
2611 pnv_pci_ioda2_release_pe_dma(pe);
2612 break;
2613 case PNV_PHB_NPU_OCAPI:
2614 break;
2615 default:
2616 WARN_ON(1);
2617 }
2618
2619 pnv_ioda_release_pe_seg(pe);
2620 pnv_ioda_deconfigure_pe(pe->phb, pe);
2621
2622
2623 if (pe->flags & PNV_IODA_PE_MASTER) {
2624 list_for_each_entry_safe(slave, tmp, &pe->slaves, list) {
2625 list_del(&slave->list);
2626 pnv_ioda_free_pe(slave);
2627 }
2628 }
2629
2630
2631
2632
2633
2634
2635
2636 if (phb->ioda.root_pe_idx == pe->pe_number)
2637 return;
2638
2639 pnv_ioda_free_pe(pe);
2640}
2641
2642static void pnv_pci_release_device(struct pci_dev *pdev)
2643{
2644 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
2645 struct pci_dn *pdn = pci_get_pdn(pdev);
2646 struct pnv_ioda_pe *pe;
2647
2648
2649 if (pdev->is_virtfn)
2650 return;
2651
2652 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
2653 return;
2654
2655#ifdef CONFIG_PCI_IOV
2656
2657
2658
2659
2660
2661 if (pdev->is_physfn)
2662 kfree(pdev->dev.archdata.iov_data);
2663#endif
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673 pe = &phb->ioda.pe_array[pdn->pe_number];
2674 pdn->pe_number = IODA_INVALID_PE;
2675
2676 WARN_ON(--pe->device_count < 0);
2677 if (pe->device_count == 0)
2678 pnv_ioda_release_pe(pe);
2679}
2680
2681static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
2682{
2683 struct pnv_phb *phb = hose->private_data;
2684
2685 opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE,
2686 OPAL_ASSERT_RESET);
2687}
2688
2689static void pnv_pci_ioda_dma_bus_setup(struct pci_bus *bus)
2690{
2691 struct pnv_phb *phb = pci_bus_to_pnvhb(bus);
2692 struct pnv_ioda_pe *pe;
2693
2694 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
2695 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
2696 continue;
2697
2698 if (!pe->pbus)
2699 continue;
2700
2701 if (bus->number == ((pe->rid >> 8) & 0xFF)) {
2702 pe->pbus = bus;
2703 break;
2704 }
2705 }
2706}
2707
2708static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
2709 .dma_dev_setup = pnv_pci_ioda_dma_dev_setup,
2710 .dma_bus_setup = pnv_pci_ioda_dma_bus_setup,
2711 .iommu_bypass_supported = pnv_pci_ioda_iommu_bypass_supported,
2712 .setup_msi_irqs = pnv_setup_msi_irqs,
2713 .teardown_msi_irqs = pnv_teardown_msi_irqs,
2714 .enable_device_hook = pnv_pci_enable_device_hook,
2715 .release_device = pnv_pci_release_device,
2716 .window_alignment = pnv_pci_window_alignment,
2717 .setup_bridge = pnv_pci_fixup_bridge_resources,
2718 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
2719 .shutdown = pnv_pci_ioda_shutdown,
2720};
2721
2722static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = {
2723 .enable_device_hook = pnv_ocapi_enable_device_hook,
2724 .release_device = pnv_pci_release_device,
2725 .window_alignment = pnv_pci_window_alignment,
2726 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
2727 .shutdown = pnv_pci_ioda_shutdown,
2728};
2729
2730static void __init pnv_pci_init_ioda_phb(struct device_node *np,
2731 u64 hub_id, int ioda_type)
2732{
2733 struct pci_controller *hose;
2734 struct pnv_phb *phb;
2735 unsigned long size, m64map_off, m32map_off, pemap_off;
2736 unsigned long iomap_off = 0, dma32map_off = 0;
2737 struct pnv_ioda_pe *root_pe;
2738 struct resource r;
2739 const __be64 *prop64;
2740 const __be32 *prop32;
2741 int len;
2742 unsigned int segno;
2743 u64 phb_id;
2744 void *aux;
2745 long rc;
2746
2747 if (!of_device_is_available(np))
2748 return;
2749
2750 pr_info("Initializing %s PHB (%pOF)\n", pnv_phb_names[ioda_type], np);
2751
2752 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
2753 if (!prop64) {
2754 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
2755 return;
2756 }
2757 phb_id = be64_to_cpup(prop64);
2758 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
2759
2760 phb = kzalloc(sizeof(*phb), GFP_KERNEL);
2761 if (!phb)
2762 panic("%s: Failed to allocate %zu bytes\n", __func__,
2763 sizeof(*phb));
2764
2765
2766 phb->hose = hose = pcibios_alloc_controller(np);
2767 if (!phb->hose) {
2768 pr_err(" Can't allocate PCI controller for %pOF\n",
2769 np);
2770 memblock_free(__pa(phb), sizeof(struct pnv_phb));
2771 return;
2772 }
2773
2774 spin_lock_init(&phb->lock);
2775 prop32 = of_get_property(np, "bus-range", &len);
2776 if (prop32 && len == 8) {
2777 hose->first_busno = be32_to_cpu(prop32[0]);
2778 hose->last_busno = be32_to_cpu(prop32[1]);
2779 } else {
2780 pr_warn(" Broken <bus-range> on %pOF\n", np);
2781 hose->first_busno = 0;
2782 hose->last_busno = 0xff;
2783 }
2784 hose->private_data = phb;
2785 phb->hub_id = hub_id;
2786 phb->opal_id = phb_id;
2787 phb->type = ioda_type;
2788 mutex_init(&phb->ioda.pe_alloc_mutex);
2789
2790
2791 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
2792 phb->model = PNV_PHB_MODEL_P7IOC;
2793 else if (of_device_is_compatible(np, "ibm,power8-pciex"))
2794 phb->model = PNV_PHB_MODEL_PHB3;
2795 else
2796 phb->model = PNV_PHB_MODEL_UNKNOWN;
2797
2798
2799 prop32 = of_get_property(np, "ibm,phb-diag-data-size", NULL);
2800 if (prop32)
2801 phb->diag_data_size = be32_to_cpup(prop32);
2802 else
2803 phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE;
2804
2805 phb->diag_data = kzalloc(phb->diag_data_size, GFP_KERNEL);
2806 if (!phb->diag_data)
2807 panic("%s: Failed to allocate %u bytes\n", __func__,
2808 phb->diag_data_size);
2809
2810
2811 pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
2812
2813
2814 if (!of_address_to_resource(np, 0, &r)) {
2815 phb->regs_phys = r.start;
2816 phb->regs = ioremap(r.start, resource_size(&r));
2817 if (phb->regs == NULL)
2818 pr_err(" Failed to map registers !\n");
2819 }
2820
2821
2822 phb->ioda.total_pe_num = 1;
2823 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
2824 if (prop32)
2825 phb->ioda.total_pe_num = be32_to_cpup(prop32);
2826 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
2827 if (prop32)
2828 phb->ioda.reserved_pe_idx = be32_to_cpup(prop32);
2829
2830
2831 for (segno = 0; segno < ARRAY_SIZE(phb->ioda.pe_rmap); segno++)
2832 phb->ioda.pe_rmap[segno] = IODA_INVALID_PE;
2833
2834
2835 pnv_ioda_parse_m64_window(phb);
2836
2837 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
2838
2839 phb->ioda.m32_size += 0x10000;
2840
2841 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe_num;
2842 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
2843 phb->ioda.io_size = hose->pci_io_size;
2844 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe_num;
2845 phb->ioda.io_pci_base = 0;
2846
2847
2848 phb->ioda.dma32_count = phb->ioda.m32_pci_base /
2849 PNV_IODA1_DMA32_SEGSIZE;
2850
2851
2852 size = ALIGN(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8,
2853 sizeof(unsigned long));
2854 m64map_off = size;
2855 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]);
2856 m32map_off = size;
2857 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m32_segmap[0]);
2858 if (phb->type == PNV_PHB_IODA1) {
2859 iomap_off = size;
2860 size += phb->ioda.total_pe_num * sizeof(phb->ioda.io_segmap[0]);
2861 dma32map_off = size;
2862 size += phb->ioda.dma32_count *
2863 sizeof(phb->ioda.dma32_segmap[0]);
2864 }
2865 pemap_off = size;
2866 size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
2867 aux = kzalloc(size, GFP_KERNEL);
2868 if (!aux)
2869 panic("%s: Failed to allocate %lu bytes\n", __func__, size);
2870
2871 phb->ioda.pe_alloc = aux;
2872 phb->ioda.m64_segmap = aux + m64map_off;
2873 phb->ioda.m32_segmap = aux + m32map_off;
2874 for (segno = 0; segno < phb->ioda.total_pe_num; segno++) {
2875 phb->ioda.m64_segmap[segno] = IODA_INVALID_PE;
2876 phb->ioda.m32_segmap[segno] = IODA_INVALID_PE;
2877 }
2878 if (phb->type == PNV_PHB_IODA1) {
2879 phb->ioda.io_segmap = aux + iomap_off;
2880 for (segno = 0; segno < phb->ioda.total_pe_num; segno++)
2881 phb->ioda.io_segmap[segno] = IODA_INVALID_PE;
2882
2883 phb->ioda.dma32_segmap = aux + dma32map_off;
2884 for (segno = 0; segno < phb->ioda.dma32_count; segno++)
2885 phb->ioda.dma32_segmap[segno] = IODA_INVALID_PE;
2886 }
2887 phb->ioda.pe_array = aux + pemap_off;
2888
2889
2890
2891
2892
2893
2894 pnv_ioda_reserve_pe(phb, phb->ioda.reserved_pe_idx);
2895 if (phb->ioda.reserved_pe_idx == 0) {
2896 phb->ioda.root_pe_idx = 1;
2897 pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
2898 } else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) {
2899 phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1;
2900 pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
2901 } else {
2902
2903 root_pe = pnv_ioda_alloc_pe(phb, 1);
2904 phb->ioda.root_pe_idx = root_pe->pe_number;
2905 }
2906
2907 INIT_LIST_HEAD(&phb->ioda.pe_list);
2908 mutex_init(&phb->ioda.pe_list_mutex);
2909
2910
2911 phb->ioda.dma32_count = phb->ioda.m32_pci_base /
2912 PNV_IODA1_DMA32_SEGSIZE;
2913
2914#if 0
2915 rc = opal_pci_set_phb_mem_window(opal->phb_id,
2916 window_type,
2917 window_num,
2918 starting_real_address,
2919 starting_pci_address,
2920 segment_size);
2921#endif
2922
2923 pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n",
2924 phb->ioda.total_pe_num, phb->ioda.reserved_pe_idx,
2925 phb->ioda.m32_size, phb->ioda.m32_segsize);
2926 if (phb->ioda.m64_size)
2927 pr_info(" M64: 0x%lx [segment=0x%lx]\n",
2928 phb->ioda.m64_size, phb->ioda.m64_segsize);
2929 if (phb->ioda.io_size)
2930 pr_info(" IO: 0x%x [segment=0x%x]\n",
2931 phb->ioda.io_size, phb->ioda.io_segsize);
2932
2933
2934 phb->hose->ops = &pnv_pci_ops;
2935 phb->get_pe_state = pnv_ioda_get_pe_state;
2936 phb->freeze_pe = pnv_ioda_freeze_pe;
2937 phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
2938
2939
2940 pnv_pci_init_ioda_msis(phb);
2941
2942
2943
2944
2945
2946
2947
2948
2949 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
2950
2951 switch (phb->type) {
2952 case PNV_PHB_NPU_OCAPI:
2953 hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops;
2954 break;
2955 default:
2956 hose->controller_ops = pnv_pci_ioda_controller_ops;
2957 }
2958
2959 ppc_md.pcibios_default_alignment = pnv_pci_default_alignment;
2960
2961#ifdef CONFIG_PCI_IOV
2962 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov;
2963 ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
2964 ppc_md.pcibios_sriov_enable = pnv_pcibios_sriov_enable;
2965 ppc_md.pcibios_sriov_disable = pnv_pcibios_sriov_disable;
2966#endif
2967
2968 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
2969
2970
2971 rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET);
2972 if (rc)
2973 pr_warn(" OPAL Error %ld performing IODA table reset !\n", rc);
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985 if (is_kdump_kernel() || pci_reset_phbs || rc) {
2986 pr_info(" Issue PHB reset ...\n");
2987 pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
2988 pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
2989 }
2990
2991
2992 if (!phb->init_m64 || phb->init_m64(phb))
2993 hose->mem_resources[1].flags = 0;
2994
2995
2996 pci_devs_phb_init_dynamic(hose);
2997}
2998
2999void __init pnv_pci_init_ioda2_phb(struct device_node *np)
3000{
3001 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
3002}
3003
3004void __init pnv_pci_init_npu2_opencapi_phb(struct device_node *np)
3005{
3006 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_OCAPI);
3007}
3008
3009static void pnv_npu2_opencapi_cfg_size_fixup(struct pci_dev *dev)
3010{
3011 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
3012
3013 if (!machine_is(powernv))
3014 return;
3015
3016 if (phb->type == PNV_PHB_NPU_OCAPI)
3017 dev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
3018}
3019DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pnv_npu2_opencapi_cfg_size_fixup);
3020
3021void __init pnv_pci_init_ioda_hub(struct device_node *np)
3022{
3023 struct device_node *phbn;
3024 const __be64 *prop64;
3025 u64 hub_id;
3026
3027 pr_info("Probing IODA IO-Hub %pOF\n", np);
3028
3029 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
3030 if (!prop64) {
3031 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
3032 return;
3033 }
3034 hub_id = be64_to_cpup(prop64);
3035 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
3036
3037
3038 for_each_child_of_node(np, phbn) {
3039
3040 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
3041 pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);
3042 }
3043}
3044