1
2
3
4
5
6
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/pci.h>
11#include <linux/bitfield.h>
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/gpio.h>
15#include <linux/init.h>
16#include <linux/mbus.h>
17#include <linux/slab.h>
18#include <linux/platform_device.h>
19#include <linux/of_address.h>
20#include <linux/of_irq.h>
21#include <linux/of_gpio.h>
22#include <linux/of_pci.h>
23#include <linux/of_platform.h>
24
25#include "../pci.h"
26#include "../pci-bridge-emul.h"
27
28
29
30
31#define PCIE_DEV_ID_OFF 0x0000
32#define PCIE_CMD_OFF 0x0004
33#define PCIE_DEV_REV_OFF 0x0008
34#define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
35#define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
36#define PCIE_SSDEV_ID_OFF 0x002c
37#define PCIE_CAP_PCIEXP 0x0060
38#define PCIE_CAP_PCIERR_OFF 0x0100
39#define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
40#define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
41#define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
42#define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
43#define PCIE_WIN5_CTRL_OFF 0x1880
44#define PCIE_WIN5_BASE_OFF 0x1884
45#define PCIE_WIN5_REMAP_OFF 0x188c
46#define PCIE_CONF_ADDR_OFF 0x18f8
47#define PCIE_CONF_ADDR_EN 0x80000000
48#define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
49#define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
50#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
51#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
52#define PCIE_CONF_ADDR(bus, devfn, where) \
53 (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
54 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
55 PCIE_CONF_ADDR_EN)
56#define PCIE_CONF_DATA_OFF 0x18fc
57#define PCIE_INT_CAUSE_OFF 0x1900
58#define PCIE_INT_UNMASK_OFF 0x1910
59#define PCIE_INT_INTX(i) BIT(24+i)
60#define PCIE_INT_PM_PME BIT(28)
61#define PCIE_INT_ALL_MASK GENMASK(31, 0)
62#define PCIE_CTRL_OFF 0x1a00
63#define PCIE_CTRL_X1_MODE 0x0001
64#define PCIE_CTRL_RC_MODE BIT(1)
65#define PCIE_CTRL_MASTER_HOT_RESET BIT(24)
66#define PCIE_STAT_OFF 0x1a04
67#define PCIE_STAT_BUS 0xff00
68#define PCIE_STAT_DEV 0x1f0000
69#define PCIE_STAT_LINK_DOWN BIT(0)
70#define PCIE_SSPL_OFF 0x1a0c
71#define PCIE_SSPL_VALUE_SHIFT 0
72#define PCIE_SSPL_VALUE_MASK GENMASK(7, 0)
73#define PCIE_SSPL_SCALE_SHIFT 8
74#define PCIE_SSPL_SCALE_MASK GENMASK(9, 8)
75#define PCIE_SSPL_ENABLE BIT(16)
76#define PCIE_RC_RTSTA 0x1a14
77#define PCIE_DEBUG_CTRL 0x1a60
78#define PCIE_DEBUG_SOFT_RESET BIT(20)
79
80struct mvebu_pcie_port;
81
82
83struct mvebu_pcie {
84 struct platform_device *pdev;
85 struct mvebu_pcie_port *ports;
86 struct resource io;
87 struct resource realio;
88 struct resource mem;
89 struct resource busn;
90 int nports;
91};
92
93struct mvebu_pcie_window {
94 phys_addr_t base;
95 phys_addr_t remap;
96 size_t size;
97};
98
99
100struct mvebu_pcie_port {
101 char *name;
102 void __iomem *base;
103 u32 port;
104 u32 lane;
105 bool is_x4;
106 int devfn;
107 unsigned int mem_target;
108 unsigned int mem_attr;
109 unsigned int io_target;
110 unsigned int io_attr;
111 struct clk *clk;
112 struct gpio_desc *reset_gpio;
113 char *reset_name;
114 struct pci_bridge_emul bridge;
115 struct device_node *dn;
116 struct mvebu_pcie *pcie;
117 struct mvebu_pcie_window memwin;
118 struct mvebu_pcie_window iowin;
119 u32 saved_pcie_stat;
120 struct resource regs;
121 u8 slot_power_limit_value;
122 u8 slot_power_limit_scale;
123 struct irq_domain *intx_irq_domain;
124 raw_spinlock_t irq_lock;
125 int intx_irq;
126};
127
128static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
129{
130 writel(val, port->base + reg);
131}
132
133static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
134{
135 return readl(port->base + reg);
136}
137
138static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
139{
140 return port->io_target != -1 && port->io_attr != -1;
141}
142
143static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
144{
145 return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
146}
147
148static u8 mvebu_pcie_get_local_bus_nr(struct mvebu_pcie_port *port)
149{
150 return (mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_BUS) >> 8;
151}
152
153static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
154{
155 u32 stat;
156
157 stat = mvebu_readl(port, PCIE_STAT_OFF);
158 stat &= ~PCIE_STAT_BUS;
159 stat |= nr << 8;
160 mvebu_writel(port, stat, PCIE_STAT_OFF);
161}
162
163static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
164{
165 u32 stat;
166
167 stat = mvebu_readl(port, PCIE_STAT_OFF);
168 stat &= ~PCIE_STAT_DEV;
169 stat |= nr << 16;
170 mvebu_writel(port, stat, PCIE_STAT_OFF);
171}
172
173static void mvebu_pcie_disable_wins(struct mvebu_pcie_port *port)
174{
175 int i;
176
177 mvebu_writel(port, 0, PCIE_BAR_LO_OFF(0));
178 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
179
180 for (i = 1; i < 3; i++) {
181 mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
182 mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
183 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
184 }
185
186 for (i = 0; i < 5; i++) {
187 mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
188 mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
189 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
190 }
191
192 mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
193 mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
194 mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
195}
196
197
198
199
200
201
202
203
204static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
205{
206 const struct mbus_dram_target_info *dram;
207 u32 size;
208 int i;
209
210 dram = mv_mbus_dram_info();
211
212
213 mvebu_pcie_disable_wins(port);
214
215
216 size = 0;
217 for (i = 0; i < dram->num_cs; i++) {
218 const struct mbus_dram_window *cs = dram->cs + i;
219
220 mvebu_writel(port, cs->base & 0xffff0000,
221 PCIE_WIN04_BASE_OFF(i));
222 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
223 mvebu_writel(port,
224 ((cs->size - 1) & 0xffff0000) |
225 (cs->mbus_attr << 8) |
226 (dram->mbus_dram_target_id << 4) | 1,
227 PCIE_WIN04_CTRL_OFF(i));
228
229 size += cs->size;
230 }
231
232
233 if ((size & (size - 1)) != 0)
234 size = 1 << fls(size);
235
236
237 mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
238 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
239 mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
240 PCIE_BAR_CTRL_OFF(1));
241
242
243
244
245 mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0));
246 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
247}
248
249static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
250{
251 u32 ctrl, lnkcap, cmd, dev_rev, unmask, sspl;
252
253
254 ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
255 ctrl |= PCIE_CTRL_RC_MODE;
256 mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
257
258
259
260
261
262
263
264
265 lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
266 lnkcap &= ~PCI_EXP_LNKCAP_MLW;
267 lnkcap |= (port->is_x4 ? 4 : 1) << 4;
268 mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
269
270
271 cmd = mvebu_readl(port, PCIE_CMD_OFF);
272 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
273 mvebu_writel(port, cmd, PCIE_CMD_OFF);
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
297 dev_rev &= ~0xffffff00;
298 dev_rev |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
299 mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF);
300
301
302 mvebu_pcie_setup_wins(port);
303
304
305
306
307
308
309 sspl = mvebu_readl(port, PCIE_SSPL_OFF);
310 sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
311 if (port->slot_power_limit_value) {
312 sspl |= port->slot_power_limit_value << PCIE_SSPL_VALUE_SHIFT;
313 sspl |= port->slot_power_limit_scale << PCIE_SSPL_SCALE_SHIFT;
314 sspl |= PCIE_SSPL_ENABLE;
315 }
316 mvebu_writel(port, sspl, PCIE_SSPL_OFF);
317
318
319 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
320
321
322 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
323
324
325 if (port->intx_irq > 0)
326 return;
327
328
329
330
331
332
333
334
335
336
337
338 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
339 unmask |= PCIE_INT_INTX(0) | PCIE_INT_INTX(1) |
340 PCIE_INT_INTX(2) | PCIE_INT_INTX(3);
341 mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
342}
343
344static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
345 struct pci_bus *bus,
346 int devfn);
347
348static int mvebu_pcie_child_rd_conf(struct pci_bus *bus, u32 devfn, int where,
349 int size, u32 *val)
350{
351 struct mvebu_pcie *pcie = bus->sysdata;
352 struct mvebu_pcie_port *port;
353 void __iomem *conf_data;
354
355 port = mvebu_pcie_find_port(pcie, bus, devfn);
356 if (!port)
357 return PCIBIOS_DEVICE_NOT_FOUND;
358
359 if (!mvebu_pcie_link_up(port))
360 return PCIBIOS_DEVICE_NOT_FOUND;
361
362 conf_data = port->base + PCIE_CONF_DATA_OFF;
363
364 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
365 PCIE_CONF_ADDR_OFF);
366
367 switch (size) {
368 case 1:
369 *val = readb_relaxed(conf_data + (where & 3));
370 break;
371 case 2:
372 *val = readw_relaxed(conf_data + (where & 2));
373 break;
374 case 4:
375 *val = readl_relaxed(conf_data);
376 break;
377 default:
378 return PCIBIOS_BAD_REGISTER_NUMBER;
379 }
380
381 return PCIBIOS_SUCCESSFUL;
382}
383
384static int mvebu_pcie_child_wr_conf(struct pci_bus *bus, u32 devfn,
385 int where, int size, u32 val)
386{
387 struct mvebu_pcie *pcie = bus->sysdata;
388 struct mvebu_pcie_port *port;
389 void __iomem *conf_data;
390
391 port = mvebu_pcie_find_port(pcie, bus, devfn);
392 if (!port)
393 return PCIBIOS_DEVICE_NOT_FOUND;
394
395 if (!mvebu_pcie_link_up(port))
396 return PCIBIOS_DEVICE_NOT_FOUND;
397
398 conf_data = port->base + PCIE_CONF_DATA_OFF;
399
400 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
401 PCIE_CONF_ADDR_OFF);
402
403 switch (size) {
404 case 1:
405 writeb(val, conf_data + (where & 3));
406 break;
407 case 2:
408 writew(val, conf_data + (where & 2));
409 break;
410 case 4:
411 writel(val, conf_data);
412 break;
413 default:
414 return PCIBIOS_BAD_REGISTER_NUMBER;
415 }
416
417 return PCIBIOS_SUCCESSFUL;
418}
419
420static struct pci_ops mvebu_pcie_child_ops = {
421 .read = mvebu_pcie_child_rd_conf,
422 .write = mvebu_pcie_child_wr_conf,
423};
424
425
426
427
428
429static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
430 phys_addr_t base, size_t size)
431{
432 while (size) {
433 size_t sz = 1 << (fls(size) - 1);
434
435 mvebu_mbus_del_window(base, sz);
436 base += sz;
437 size -= sz;
438 }
439}
440
441
442
443
444
445
446
447static int mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
448 unsigned int target, unsigned int attribute,
449 phys_addr_t base, size_t size,
450 phys_addr_t remap)
451{
452 size_t size_mapped = 0;
453
454 while (size) {
455 size_t sz = 1 << (fls(size) - 1);
456 int ret;
457
458 ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
459 sz, remap);
460 if (ret) {
461 phys_addr_t end = base + sz - 1;
462
463 dev_err(&port->pcie->pdev->dev,
464 "Could not create MBus window at [mem %pa-%pa]: %d\n",
465 &base, &end, ret);
466 mvebu_pcie_del_windows(port, base - size_mapped,
467 size_mapped);
468 return ret;
469 }
470
471 size -= sz;
472 size_mapped += sz;
473 base += sz;
474 if (remap != MVEBU_MBUS_NO_REMAP)
475 remap += sz;
476 }
477
478 return 0;
479}
480
481static int mvebu_pcie_set_window(struct mvebu_pcie_port *port,
482 unsigned int target, unsigned int attribute,
483 const struct mvebu_pcie_window *desired,
484 struct mvebu_pcie_window *cur)
485{
486 int ret;
487
488 if (desired->base == cur->base && desired->remap == cur->remap &&
489 desired->size == cur->size)
490 return 0;
491
492 if (cur->size != 0) {
493 mvebu_pcie_del_windows(port, cur->base, cur->size);
494 cur->size = 0;
495 cur->base = 0;
496
497
498
499
500
501
502 }
503
504 if (desired->size == 0)
505 return 0;
506
507 ret = mvebu_pcie_add_windows(port, target, attribute, desired->base,
508 desired->size, desired->remap);
509 if (ret) {
510 cur->size = 0;
511 cur->base = 0;
512 return ret;
513 }
514
515 *cur = *desired;
516 return 0;
517}
518
519static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
520{
521 struct mvebu_pcie_window desired = {};
522 struct pci_bridge_emul_conf *conf = &port->bridge.conf;
523
524
525 if (conf->iolimit < conf->iobase ||
526 conf->iolimitupper < conf->iobaseupper)
527 return mvebu_pcie_set_window(port, port->io_target, port->io_attr,
528 &desired, &port->iowin);
529
530
531
532
533
534
535
536
537 desired.remap = ((conf->iobase & 0xF0) << 8) |
538 (conf->iobaseupper << 16);
539 desired.base = port->pcie->io.start + desired.remap;
540 desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
541 (conf->iolimitupper << 16)) -
542 desired.remap) +
543 1;
544
545 return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
546 &port->iowin);
547}
548
549static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
550{
551 struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
552 struct pci_bridge_emul_conf *conf = &port->bridge.conf;
553
554
555 if (conf->memlimit < conf->membase)
556 return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
557 &desired, &port->memwin);
558
559
560
561
562
563
564
565 desired.base = ((conf->membase & 0xFFF0) << 16);
566 desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) -
567 desired.base + 1;
568
569 return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
570 &port->memwin);
571}
572
573static pci_bridge_emul_read_status_t
574mvebu_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
575 int reg, u32 *value)
576{
577 struct mvebu_pcie_port *port = bridge->data;
578
579 switch (reg) {
580 case PCI_COMMAND:
581 *value = mvebu_readl(port, PCIE_CMD_OFF);
582 break;
583
584 case PCI_PRIMARY_BUS: {
585
586
587
588
589
590 __le32 *cfgspace = (__le32 *)&bridge->conf;
591 u32 val = le32_to_cpu(cfgspace[PCI_PRIMARY_BUS / 4]);
592 val &= ~0xff00;
593 val |= mvebu_pcie_get_local_bus_nr(port) << 8;
594 *value = val;
595 break;
596 }
597
598 case PCI_INTERRUPT_LINE: {
599
600
601
602
603
604 __le32 *cfgspace = (__le32 *)&bridge->conf;
605 u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
606 if (mvebu_readl(port, PCIE_CTRL_OFF) & PCIE_CTRL_MASTER_HOT_RESET)
607 val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
608 else
609 val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
610 *value = val;
611 break;
612 }
613
614 default:
615 return PCI_BRIDGE_EMUL_NOT_HANDLED;
616 }
617
618 return PCI_BRIDGE_EMUL_HANDLED;
619}
620
621static pci_bridge_emul_read_status_t
622mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
623 int reg, u32 *value)
624{
625 struct mvebu_pcie_port *port = bridge->data;
626
627 switch (reg) {
628 case PCI_EXP_DEVCAP:
629 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
630 break;
631
632 case PCI_EXP_DEVCTL:
633 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
634 break;
635
636 case PCI_EXP_LNKCAP:
637
638
639
640
641
642
643 *value = (mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
644 ~PCI_EXP_LNKCAP_CLKPM) | PCI_EXP_LNKCAP_DLLLARC;
645 break;
646
647 case PCI_EXP_LNKCTL:
648
649 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL) |
650 (mvebu_pcie_link_up(port) ?
651 (PCI_EXP_LNKSTA_DLLLA << 16) : 0);
652 break;
653
654 case PCI_EXP_SLTCTL: {
655 u16 slotctl = le16_to_cpu(bridge->pcie_conf.slotctl);
656 u16 slotsta = le16_to_cpu(bridge->pcie_conf.slotsta);
657 u32 val = 0;
658
659
660
661
662
663 if (!port->slot_power_limit_value)
664 val |= slotctl & PCI_EXP_SLTCTL_ASPL_DISABLE;
665 else if (!(mvebu_readl(port, PCIE_SSPL_OFF) & PCIE_SSPL_ENABLE))
666 val |= PCI_EXP_SLTCTL_ASPL_DISABLE;
667
668 val |= slotsta << 16;
669 *value = val;
670 break;
671 }
672
673 case PCI_EXP_RTSTA:
674 *value = mvebu_readl(port, PCIE_RC_RTSTA);
675 break;
676
677 case PCI_EXP_DEVCAP2:
678 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP2);
679 break;
680
681 case PCI_EXP_DEVCTL2:
682 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
683 break;
684
685 case PCI_EXP_LNKCTL2:
686 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
687 break;
688
689 default:
690 return PCI_BRIDGE_EMUL_NOT_HANDLED;
691 }
692
693 return PCI_BRIDGE_EMUL_HANDLED;
694}
695
696static pci_bridge_emul_read_status_t
697mvebu_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
698 int reg, u32 *value)
699{
700 struct mvebu_pcie_port *port = bridge->data;
701
702 switch (reg) {
703 case 0:
704 case PCI_ERR_UNCOR_STATUS:
705 case PCI_ERR_UNCOR_MASK:
706 case PCI_ERR_UNCOR_SEVER:
707 case PCI_ERR_COR_STATUS:
708 case PCI_ERR_COR_MASK:
709 case PCI_ERR_CAP:
710 case PCI_ERR_HEADER_LOG+0:
711 case PCI_ERR_HEADER_LOG+4:
712 case PCI_ERR_HEADER_LOG+8:
713 case PCI_ERR_HEADER_LOG+12:
714 case PCI_ERR_ROOT_COMMAND:
715 case PCI_ERR_ROOT_STATUS:
716 case PCI_ERR_ROOT_ERR_SRC:
717 *value = mvebu_readl(port, PCIE_CAP_PCIERR_OFF + reg);
718 break;
719
720 default:
721 return PCI_BRIDGE_EMUL_NOT_HANDLED;
722 }
723
724 return PCI_BRIDGE_EMUL_HANDLED;
725}
726
727static void
728mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
729 int reg, u32 old, u32 new, u32 mask)
730{
731 struct mvebu_pcie_port *port = bridge->data;
732 struct pci_bridge_emul_conf *conf = &bridge->conf;
733
734 switch (reg) {
735 case PCI_COMMAND:
736 mvebu_writel(port, new, PCIE_CMD_OFF);
737 break;
738
739 case PCI_IO_BASE:
740 if ((mask & 0xffff) && mvebu_has_ioport(port) &&
741 mvebu_pcie_handle_iobase_change(port)) {
742
743 conf->iobase &= ~0xf0;
744 conf->iolimit &= ~0xf0;
745 conf->iobase |= 0xf0;
746 conf->iobaseupper = cpu_to_le16(0x0000);
747 conf->iolimitupper = cpu_to_le16(0x0000);
748 }
749 break;
750
751 case PCI_MEMORY_BASE:
752 if (mvebu_pcie_handle_membase_change(port)) {
753
754 conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) & ~0xfff0);
755 conf->memlimit = cpu_to_le16(le16_to_cpu(conf->memlimit) & ~0xfff0);
756 conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) | 0xfff0);
757 }
758 break;
759
760 case PCI_IO_BASE_UPPER16:
761 if (mvebu_has_ioport(port) &&
762 mvebu_pcie_handle_iobase_change(port)) {
763
764 conf->iobase &= ~0xf0;
765 conf->iolimit &= ~0xf0;
766 conf->iobase |= 0xf0;
767 conf->iobaseupper = cpu_to_le16(0x0000);
768 conf->iolimitupper = cpu_to_le16(0x0000);
769 }
770 break;
771
772 case PCI_PRIMARY_BUS:
773 if (mask & 0xff00)
774 mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
775 break;
776
777 case PCI_INTERRUPT_LINE:
778 if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
779 u32 ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
780 if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
781 ctrl |= PCIE_CTRL_MASTER_HOT_RESET;
782 else
783 ctrl &= ~PCIE_CTRL_MASTER_HOT_RESET;
784 mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
785 }
786 break;
787
788 default:
789 break;
790 }
791}
792
793static void
794mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
795 int reg, u32 old, u32 new, u32 mask)
796{
797 struct mvebu_pcie_port *port = bridge->data;
798
799 switch (reg) {
800 case PCI_EXP_DEVCTL:
801 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
802 break;
803
804 case PCI_EXP_LNKCTL:
805
806
807
808
809
810 new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
811
812 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
813 break;
814
815 case PCI_EXP_SLTCTL:
816
817
818
819
820 if ((mask & PCI_EXP_SLTCTL_ASPL_DISABLE) &&
821 port->slot_power_limit_value) {
822 u32 sspl = mvebu_readl(port, PCIE_SSPL_OFF);
823 if (new & PCI_EXP_SLTCTL_ASPL_DISABLE)
824 sspl &= ~PCIE_SSPL_ENABLE;
825 else
826 sspl |= PCIE_SSPL_ENABLE;
827 mvebu_writel(port, sspl, PCIE_SSPL_OFF);
828 }
829 break;
830
831 case PCI_EXP_RTSTA:
832
833
834
835
836
837
838 if (new & PCI_EXP_RTSTA_PME)
839 mvebu_writel(port, ~PCIE_INT_PM_PME, PCIE_INT_CAUSE_OFF);
840 break;
841
842 case PCI_EXP_DEVCTL2:
843 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
844 break;
845
846 case PCI_EXP_LNKCTL2:
847 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
848 break;
849
850 default:
851 break;
852 }
853}
854
855static void
856mvebu_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
857 int reg, u32 old, u32 new, u32 mask)
858{
859 struct mvebu_pcie_port *port = bridge->data;
860
861 switch (reg) {
862
863 case PCI_ERR_UNCOR_STATUS:
864 case PCI_ERR_COR_STATUS:
865 case PCI_ERR_ROOT_STATUS:
866 new &= mask;
867 fallthrough;
868
869 case PCI_ERR_UNCOR_MASK:
870 case PCI_ERR_UNCOR_SEVER:
871 case PCI_ERR_COR_MASK:
872 case PCI_ERR_CAP:
873 case PCI_ERR_HEADER_LOG+0:
874 case PCI_ERR_HEADER_LOG+4:
875 case PCI_ERR_HEADER_LOG+8:
876 case PCI_ERR_HEADER_LOG+12:
877 case PCI_ERR_ROOT_COMMAND:
878 case PCI_ERR_ROOT_ERR_SRC:
879 mvebu_writel(port, new, PCIE_CAP_PCIERR_OFF + reg);
880 break;
881
882 default:
883 break;
884 }
885}
886
887static const struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
888 .read_base = mvebu_pci_bridge_emul_base_conf_read,
889 .write_base = mvebu_pci_bridge_emul_base_conf_write,
890 .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
891 .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
892 .read_ext = mvebu_pci_bridge_emul_ext_conf_read,
893 .write_ext = mvebu_pci_bridge_emul_ext_conf_write,
894};
895
896
897
898
899
900static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
901{
902 unsigned int bridge_flags = PCI_BRIDGE_EMUL_NO_PREFMEM_FORWARD;
903 struct pci_bridge_emul *bridge = &port->bridge;
904 u32 dev_id = mvebu_readl(port, PCIE_DEV_ID_OFF);
905 u32 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
906 u32 ssdev_id = mvebu_readl(port, PCIE_SSDEV_ID_OFF);
907 u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP);
908 u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS);
909
910 bridge->conf.vendor = cpu_to_le16(dev_id & 0xffff);
911 bridge->conf.device = cpu_to_le16(dev_id >> 16);
912 bridge->conf.class_revision = cpu_to_le32(dev_rev & 0xff);
913
914 if (mvebu_has_ioport(port)) {
915
916 bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
917 bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
918 } else {
919 bridge_flags |= PCI_BRIDGE_EMUL_NO_IO_FORWARD;
920 }
921
922
923
924
925
926
927 bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver | PCI_EXP_FLAGS_SLOT);
928
929
930
931
932
933
934
935
936
937
938
939
940 bridge->pcie_conf.slotcap = cpu_to_le32(
941 FIELD_PREP(PCI_EXP_SLTCAP_SPLV, port->slot_power_limit_value) |
942 FIELD_PREP(PCI_EXP_SLTCAP_SPLS, port->slot_power_limit_scale) |
943 FIELD_PREP(PCI_EXP_SLTCAP_PSN, port->port+1));
944 bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
945
946 bridge->subsystem_vendor_id = ssdev_id & 0xffff;
947 bridge->subsystem_id = ssdev_id >> 16;
948 bridge->has_pcie = true;
949 bridge->data = port;
950 bridge->ops = &mvebu_pci_bridge_emul_ops;
951
952 return pci_bridge_emul_init(bridge, bridge_flags);
953}
954
955static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
956{
957 return sys->private_data;
958}
959
960static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
961 struct pci_bus *bus,
962 int devfn)
963{
964 int i;
965
966 for (i = 0; i < pcie->nports; i++) {
967 struct mvebu_pcie_port *port = &pcie->ports[i];
968
969 if (!port->base)
970 continue;
971
972 if (bus->number == 0 && port->devfn == devfn)
973 return port;
974 if (bus->number != 0 &&
975 bus->number >= port->bridge.conf.secondary_bus &&
976 bus->number <= port->bridge.conf.subordinate_bus)
977 return port;
978 }
979
980 return NULL;
981}
982
983
984static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
985 int where, int size, u32 val)
986{
987 struct mvebu_pcie *pcie = bus->sysdata;
988 struct mvebu_pcie_port *port;
989
990 port = mvebu_pcie_find_port(pcie, bus, devfn);
991 if (!port)
992 return PCIBIOS_DEVICE_NOT_FOUND;
993
994 return pci_bridge_emul_conf_write(&port->bridge, where, size, val);
995}
996
997
998static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
999 int size, u32 *val)
1000{
1001 struct mvebu_pcie *pcie = bus->sysdata;
1002 struct mvebu_pcie_port *port;
1003
1004 port = mvebu_pcie_find_port(pcie, bus, devfn);
1005 if (!port)
1006 return PCIBIOS_DEVICE_NOT_FOUND;
1007
1008 return pci_bridge_emul_conf_read(&port->bridge, where, size, val);
1009}
1010
1011static struct pci_ops mvebu_pcie_ops = {
1012 .read = mvebu_pcie_rd_conf,
1013 .write = mvebu_pcie_wr_conf,
1014};
1015
1016static void mvebu_pcie_intx_irq_mask(struct irq_data *d)
1017{
1018 struct mvebu_pcie_port *port = d->domain->host_data;
1019 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1020 unsigned long flags;
1021 u32 unmask;
1022
1023 raw_spin_lock_irqsave(&port->irq_lock, flags);
1024 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
1025 unmask &= ~PCIE_INT_INTX(hwirq);
1026 mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
1027 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
1028}
1029
1030static void mvebu_pcie_intx_irq_unmask(struct irq_data *d)
1031{
1032 struct mvebu_pcie_port *port = d->domain->host_data;
1033 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1034 unsigned long flags;
1035 u32 unmask;
1036
1037 raw_spin_lock_irqsave(&port->irq_lock, flags);
1038 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
1039 unmask |= PCIE_INT_INTX(hwirq);
1040 mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
1041 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
1042}
1043
1044static struct irq_chip intx_irq_chip = {
1045 .name = "mvebu-INTx",
1046 .irq_mask = mvebu_pcie_intx_irq_mask,
1047 .irq_unmask = mvebu_pcie_intx_irq_unmask,
1048};
1049
1050static int mvebu_pcie_intx_irq_map(struct irq_domain *h,
1051 unsigned int virq, irq_hw_number_t hwirq)
1052{
1053 struct mvebu_pcie_port *port = h->host_data;
1054
1055 irq_set_status_flags(virq, IRQ_LEVEL);
1056 irq_set_chip_and_handler(virq, &intx_irq_chip, handle_level_irq);
1057 irq_set_chip_data(virq, port);
1058
1059 return 0;
1060}
1061
1062static const struct irq_domain_ops mvebu_pcie_intx_irq_domain_ops = {
1063 .map = mvebu_pcie_intx_irq_map,
1064 .xlate = irq_domain_xlate_onecell,
1065};
1066
1067static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port)
1068{
1069 struct device *dev = &port->pcie->pdev->dev;
1070 struct device_node *pcie_intc_node;
1071
1072 raw_spin_lock_init(&port->irq_lock);
1073
1074 pcie_intc_node = of_get_next_child(port->dn, NULL);
1075 if (!pcie_intc_node) {
1076 dev_err(dev, "No PCIe Intc node found for %s\n", port->name);
1077 return -ENODEV;
1078 }
1079
1080 port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
1081 &mvebu_pcie_intx_irq_domain_ops,
1082 port);
1083 of_node_put(pcie_intc_node);
1084 if (!port->intx_irq_domain) {
1085 dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name);
1086 return -ENOMEM;
1087 }
1088
1089 return 0;
1090}
1091
1092static void mvebu_pcie_irq_handler(struct irq_desc *desc)
1093{
1094 struct mvebu_pcie_port *port = irq_desc_get_handler_data(desc);
1095 struct irq_chip *chip = irq_desc_get_chip(desc);
1096 struct device *dev = &port->pcie->pdev->dev;
1097 u32 cause, unmask, status;
1098 int i;
1099
1100 chained_irq_enter(chip, desc);
1101
1102 cause = mvebu_readl(port, PCIE_INT_CAUSE_OFF);
1103 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
1104 status = cause & unmask;
1105
1106
1107 for (i = 0; i < PCI_NUM_INTX; i++) {
1108 if (!(status & PCIE_INT_INTX(i)))
1109 continue;
1110
1111 if (generic_handle_domain_irq(port->intx_irq_domain, i) == -EINVAL)
1112 dev_err_ratelimited(dev, "unexpected INT%c IRQ\n", (char)i+'A');
1113 }
1114
1115 chained_irq_exit(chip, desc);
1116}
1117
1118static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
1119{
1120
1121 if (dev->bus->number == 0)
1122 return 0;
1123
1124 return of_irq_parse_and_map_pci(dev, slot, pin);
1125}
1126
1127static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
1128 const struct resource *res,
1129 resource_size_t start,
1130 resource_size_t size,
1131 resource_size_t align)
1132{
1133 if (dev->bus->number != 0)
1134 return start;
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 if (res->flags & IORESOURCE_IO)
1148 return round_up(start, max_t(resource_size_t, SZ_64K,
1149 rounddown_pow_of_two(size)));
1150 else if (res->flags & IORESOURCE_MEM)
1151 return round_up(start, max_t(resource_size_t, SZ_1M,
1152 rounddown_pow_of_two(size)));
1153 else
1154 return start;
1155}
1156
1157static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
1158 struct device_node *np,
1159 struct mvebu_pcie_port *port)
1160{
1161 int ret = 0;
1162
1163 ret = of_address_to_resource(np, 0, &port->regs);
1164 if (ret)
1165 return (void __iomem *)ERR_PTR(ret);
1166
1167 return devm_ioremap_resource(&pdev->dev, &port->regs);
1168}
1169
1170#define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
1171#define DT_TYPE_IO 0x1
1172#define DT_TYPE_MEM32 0x2
1173#define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
1174#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
1175
1176static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
1177 unsigned long type,
1178 unsigned int *tgt,
1179 unsigned int *attr)
1180{
1181 const int na = 3, ns = 2;
1182 const __be32 *range;
1183 int rlen, nranges, rangesz, pna, i;
1184
1185 *tgt = -1;
1186 *attr = -1;
1187
1188 range = of_get_property(np, "ranges", &rlen);
1189 if (!range)
1190 return -EINVAL;
1191
1192 pna = of_n_addr_cells(np);
1193 rangesz = pna + na + ns;
1194 nranges = rlen / sizeof(__be32) / rangesz;
1195
1196 for (i = 0; i < nranges; i++, range += rangesz) {
1197 u32 flags = of_read_number(range, 1);
1198 u32 slot = of_read_number(range + 1, 1);
1199 u64 cpuaddr = of_read_number(range + na, pna);
1200 unsigned long rtype;
1201
1202 if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
1203 rtype = IORESOURCE_IO;
1204 else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
1205 rtype = IORESOURCE_MEM;
1206 else
1207 continue;
1208
1209 if (slot == PCI_SLOT(devfn) && type == rtype) {
1210 *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
1211 *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
1212 return 0;
1213 }
1214 }
1215
1216 return -ENOENT;
1217}
1218
1219static int mvebu_pcie_suspend(struct device *dev)
1220{
1221 struct mvebu_pcie *pcie;
1222 int i;
1223
1224 pcie = dev_get_drvdata(dev);
1225 for (i = 0; i < pcie->nports; i++) {
1226 struct mvebu_pcie_port *port = pcie->ports + i;
1227 if (!port->base)
1228 continue;
1229 port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
1230 }
1231
1232 return 0;
1233}
1234
1235static int mvebu_pcie_resume(struct device *dev)
1236{
1237 struct mvebu_pcie *pcie;
1238 int i;
1239
1240 pcie = dev_get_drvdata(dev);
1241 for (i = 0; i < pcie->nports; i++) {
1242 struct mvebu_pcie_port *port = pcie->ports + i;
1243 if (!port->base)
1244 continue;
1245 mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
1246 mvebu_pcie_setup_hw(port);
1247 }
1248
1249 return 0;
1250}
1251
1252static void mvebu_pcie_port_clk_put(void *data)
1253{
1254 struct mvebu_pcie_port *port = data;
1255
1256 clk_put(port->clk);
1257}
1258
1259static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
1260 struct mvebu_pcie_port *port, struct device_node *child)
1261{
1262 struct device *dev = &pcie->pdev->dev;
1263 enum of_gpio_flags flags;
1264 u32 slot_power_limit;
1265 int reset_gpio, ret;
1266 u32 num_lanes;
1267
1268 port->pcie = pcie;
1269
1270 if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
1271 dev_warn(dev, "ignoring %pOF, missing pcie-port property\n",
1272 child);
1273 goto skip;
1274 }
1275
1276 if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
1277 port->lane = 0;
1278
1279 if (!of_property_read_u32(child, "num-lanes", &num_lanes) && num_lanes == 4)
1280 port->is_x4 = true;
1281
1282 port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
1283 port->lane);
1284 if (!port->name) {
1285 ret = -ENOMEM;
1286 goto err;
1287 }
1288
1289 port->devfn = of_pci_get_devfn(child);
1290 if (port->devfn < 0)
1291 goto skip;
1292 if (PCI_FUNC(port->devfn) != 0) {
1293 dev_err(dev, "%s: invalid function number, must be zero\n",
1294 port->name);
1295 goto skip;
1296 }
1297
1298 ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
1299 &port->mem_target, &port->mem_attr);
1300 if (ret < 0) {
1301 dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
1302 port->name);
1303 goto skip;
1304 }
1305
1306 if (resource_size(&pcie->io) != 0) {
1307 mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
1308 &port->io_target, &port->io_attr);
1309 } else {
1310 port->io_target = -1;
1311 port->io_attr = -1;
1312 }
1313
1314
1315
1316
1317
1318 port->intx_irq = of_irq_get_byname(child, "intx");
1319 if (port->intx_irq == -EPROBE_DEFER) {
1320 ret = port->intx_irq;
1321 goto err;
1322 }
1323 if (port->intx_irq <= 0) {
1324 dev_warn(dev, "%s: legacy INTx interrupts cannot be masked individually, "
1325 "%pOF does not contain intx interrupt\n",
1326 port->name, child);
1327 }
1328
1329 reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags);
1330 if (reset_gpio == -EPROBE_DEFER) {
1331 ret = reset_gpio;
1332 goto err;
1333 }
1334
1335 if (gpio_is_valid(reset_gpio)) {
1336 unsigned long gpio_flags;
1337
1338 port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
1339 port->name);
1340 if (!port->reset_name) {
1341 ret = -ENOMEM;
1342 goto err;
1343 }
1344
1345 if (flags & OF_GPIO_ACTIVE_LOW) {
1346 dev_info(dev, "%pOF: reset gpio is active low\n",
1347 child);
1348 gpio_flags = GPIOF_ACTIVE_LOW |
1349 GPIOF_OUT_INIT_LOW;
1350 } else {
1351 gpio_flags = GPIOF_OUT_INIT_HIGH;
1352 }
1353
1354 ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags,
1355 port->reset_name);
1356 if (ret) {
1357 if (ret == -EPROBE_DEFER)
1358 goto err;
1359 goto skip;
1360 }
1361
1362 port->reset_gpio = gpio_to_desc(reset_gpio);
1363 }
1364
1365 slot_power_limit = of_pci_get_slot_power_limit(child,
1366 &port->slot_power_limit_value,
1367 &port->slot_power_limit_scale);
1368 if (slot_power_limit)
1369 dev_info(dev, "%s: Slot power limit %u.%uW\n",
1370 port->name,
1371 slot_power_limit / 1000,
1372 (slot_power_limit / 100) % 10);
1373
1374 port->clk = of_clk_get_by_name(child, NULL);
1375 if (IS_ERR(port->clk)) {
1376 dev_err(dev, "%s: cannot get clock\n", port->name);
1377 goto skip;
1378 }
1379
1380 ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
1381 if (ret < 0) {
1382 clk_put(port->clk);
1383 goto err;
1384 }
1385
1386 return 1;
1387
1388skip:
1389 ret = 0;
1390
1391
1392 devm_kfree(dev, port->reset_name);
1393 port->reset_name = NULL;
1394 devm_kfree(dev, port->name);
1395 port->name = NULL;
1396
1397err:
1398 return ret;
1399}
1400
1401
1402
1403
1404
1405
1406static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
1407{
1408 int ret;
1409
1410 ret = clk_prepare_enable(port->clk);
1411 if (ret < 0)
1412 return ret;
1413
1414 if (port->reset_gpio) {
1415 u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000;
1416
1417 of_property_read_u32(port->dn, "reset-delay-us",
1418 &reset_udelay);
1419
1420 udelay(100);
1421
1422 gpiod_set_value_cansleep(port->reset_gpio, 0);
1423 msleep(reset_udelay / 1000);
1424 }
1425
1426 return 0;
1427}
1428
1429
1430
1431
1432
1433static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
1434{
1435 gpiod_set_value_cansleep(port->reset_gpio, 1);
1436
1437 clk_disable_unprepare(port->clk);
1438}
1439
1440
1441
1442
1443
1444
1445static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
1446{
1447 struct device *dev = &pcie->pdev->dev;
1448 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1449 int ret;
1450
1451
1452 mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
1453 if (resource_size(&pcie->mem) == 0) {
1454 dev_err(dev, "invalid memory aperture size\n");
1455 return -EINVAL;
1456 }
1457
1458 pcie->mem.name = "PCI MEM";
1459 pci_add_resource(&bridge->windows, &pcie->mem);
1460 ret = devm_request_resource(dev, &iomem_resource, &pcie->mem);
1461 if (ret)
1462 return ret;
1463
1464
1465 mvebu_mbus_get_pcie_io_aperture(&pcie->io);
1466
1467 if (resource_size(&pcie->io) != 0) {
1468 pcie->realio.flags = pcie->io.flags;
1469 pcie->realio.start = PCIBIOS_MIN_IO;
1470 pcie->realio.end = min_t(resource_size_t,
1471 IO_SPACE_LIMIT - SZ_64K,
1472 resource_size(&pcie->io) - 1);
1473 pcie->realio.name = "PCI I/O";
1474
1475 ret = devm_pci_remap_iospace(dev, &pcie->realio, pcie->io.start);
1476 if (ret)
1477 return ret;
1478
1479 pci_add_resource(&bridge->windows, &pcie->realio);
1480 ret = devm_request_resource(dev, &ioport_resource, &pcie->realio);
1481 if (ret)
1482 return ret;
1483 }
1484
1485 return 0;
1486}
1487
1488static int mvebu_pcie_probe(struct platform_device *pdev)
1489{
1490 struct device *dev = &pdev->dev;
1491 struct mvebu_pcie *pcie;
1492 struct pci_host_bridge *bridge;
1493 struct device_node *np = dev->of_node;
1494 struct device_node *child;
1495 int num, i, ret;
1496
1497 bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie));
1498 if (!bridge)
1499 return -ENOMEM;
1500
1501 pcie = pci_host_bridge_priv(bridge);
1502 pcie->pdev = pdev;
1503 platform_set_drvdata(pdev, pcie);
1504
1505 ret = mvebu_pcie_parse_request_resources(pcie);
1506 if (ret)
1507 return ret;
1508
1509 num = of_get_available_child_count(np);
1510
1511 pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
1512 if (!pcie->ports)
1513 return -ENOMEM;
1514
1515 i = 0;
1516 for_each_available_child_of_node(np, child) {
1517 struct mvebu_pcie_port *port = &pcie->ports[i];
1518
1519 ret = mvebu_pcie_parse_port(pcie, port, child);
1520 if (ret < 0) {
1521 of_node_put(child);
1522 return ret;
1523 } else if (ret == 0) {
1524 continue;
1525 }
1526
1527 port->dn = child;
1528 i++;
1529 }
1530 pcie->nports = i;
1531
1532 for (i = 0; i < pcie->nports; i++) {
1533 struct mvebu_pcie_port *port = &pcie->ports[i];
1534 int irq = port->intx_irq;
1535
1536 child = port->dn;
1537 if (!child)
1538 continue;
1539
1540 ret = mvebu_pcie_powerup(port);
1541 if (ret < 0)
1542 continue;
1543
1544 port->base = mvebu_pcie_map_registers(pdev, child, port);
1545 if (IS_ERR(port->base)) {
1546 dev_err(dev, "%s: cannot map registers\n", port->name);
1547 port->base = NULL;
1548 mvebu_pcie_powerdown(port);
1549 continue;
1550 }
1551
1552 ret = mvebu_pci_bridge_emul_init(port);
1553 if (ret < 0) {
1554 dev_err(dev, "%s: cannot init emulated bridge\n",
1555 port->name);
1556 devm_iounmap(dev, port->base);
1557 port->base = NULL;
1558 mvebu_pcie_powerdown(port);
1559 continue;
1560 }
1561
1562 if (irq > 0) {
1563 ret = mvebu_pcie_init_irq_domain(port);
1564 if (ret) {
1565 dev_err(dev, "%s: cannot init irq domain\n",
1566 port->name);
1567 pci_bridge_emul_cleanup(&port->bridge);
1568 devm_iounmap(dev, port->base);
1569 port->base = NULL;
1570 mvebu_pcie_powerdown(port);
1571 continue;
1572 }
1573 irq_set_chained_handler_and_data(irq,
1574 mvebu_pcie_irq_handler,
1575 port);
1576 }
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653 mvebu_pcie_setup_hw(port);
1654 mvebu_pcie_set_local_dev_nr(port, 1);
1655 mvebu_pcie_set_local_bus_nr(port, 0);
1656 }
1657
1658 bridge->sysdata = pcie;
1659 bridge->ops = &mvebu_pcie_ops;
1660 bridge->child_ops = &mvebu_pcie_child_ops;
1661 bridge->align_resource = mvebu_pcie_align_resource;
1662 bridge->map_irq = mvebu_pcie_map_irq;
1663
1664 return pci_host_probe(bridge);
1665}
1666
1667static int mvebu_pcie_remove(struct platform_device *pdev)
1668{
1669 struct mvebu_pcie *pcie = platform_get_drvdata(pdev);
1670 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1671 u32 cmd, sspl;
1672 int i;
1673
1674
1675 pci_lock_rescan_remove();
1676 pci_stop_root_bus(bridge->bus);
1677 pci_remove_root_bus(bridge->bus);
1678 pci_unlock_rescan_remove();
1679
1680 for (i = 0; i < pcie->nports; i++) {
1681 struct mvebu_pcie_port *port = &pcie->ports[i];
1682 int irq = port->intx_irq;
1683
1684 if (!port->base)
1685 continue;
1686
1687
1688 cmd = mvebu_readl(port, PCIE_CMD_OFF);
1689 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1690 mvebu_writel(port, cmd, PCIE_CMD_OFF);
1691
1692
1693 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
1694
1695
1696 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
1697
1698 if (irq > 0)
1699 irq_set_chained_handler_and_data(irq, NULL, NULL);
1700
1701
1702 if (port->intx_irq_domain)
1703 irq_domain_remove(port->intx_irq_domain);
1704
1705
1706 pci_bridge_emul_cleanup(&port->bridge);
1707
1708
1709 sspl = mvebu_readl(port, PCIE_SSPL_OFF);
1710 sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
1711 mvebu_writel(port, sspl, PCIE_SSPL_OFF);
1712
1713
1714 mvebu_pcie_disable_wins(port);
1715
1716
1717 if (port->iowin.size)
1718 mvebu_pcie_del_windows(port, port->iowin.base, port->iowin.size);
1719 if (port->memwin.size)
1720 mvebu_pcie_del_windows(port, port->memwin.base, port->memwin.size);
1721
1722
1723 mvebu_pcie_powerdown(port);
1724 }
1725
1726 return 0;
1727}
1728
1729static const struct of_device_id mvebu_pcie_of_match_table[] = {
1730 { .compatible = "marvell,armada-xp-pcie", },
1731 { .compatible = "marvell,armada-370-pcie", },
1732 { .compatible = "marvell,dove-pcie", },
1733 { .compatible = "marvell,kirkwood-pcie", },
1734 {},
1735};
1736
1737static const struct dev_pm_ops mvebu_pcie_pm_ops = {
1738 NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
1739};
1740
1741static struct platform_driver mvebu_pcie_driver = {
1742 .driver = {
1743 .name = "mvebu-pcie",
1744 .of_match_table = mvebu_pcie_of_match_table,
1745 .pm = &mvebu_pcie_pm_ops,
1746 },
1747 .probe = mvebu_pcie_probe,
1748 .remove = mvebu_pcie_remove,
1749};
1750module_platform_driver(mvebu_pcie_driver);
1751
1752MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@bootlin.com>");
1753MODULE_AUTHOR("Pali Roh�r <pali@kernel.org>");
1754MODULE_DESCRIPTION("Marvell EBU PCIe controller");
1755MODULE_LICENSE("GPL v2");
1756