linux/drivers/iommu/fsl_pamu.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright (C) 2013 Freescale Semiconductor, Inc.
  16 *
  17 */
  18
  19#define pr_fmt(fmt)    "fsl-pamu: %s: " fmt, __func__
  20
  21#include <linux/init.h>
  22#include <linux/iommu.h>
  23#include <linux/slab.h>
  24#include <linux/module.h>
  25#include <linux/types.h>
  26#include <linux/mm.h>
  27#include <linux/interrupt.h>
  28#include <linux/device.h>
  29#include <linux/of_platform.h>
  30#include <linux/bootmem.h>
  31#include <linux/genalloc.h>
  32#include <asm/io.h>
  33#include <asm/bitops.h>
  34#include <asm/fsl_guts.h>
  35
  36#include "fsl_pamu.h"
  37
  38/* define indexes for each operation mapping scenario */
  39#define OMI_QMAN        0x00
  40#define OMI_FMAN        0x01
  41#define OMI_QMAN_PRIV   0x02
  42#define OMI_CAAM        0x03
  43
  44#define make64(high, low) (((u64)(high) << 32) | (low))
  45
  46struct pamu_isr_data {
  47        void __iomem *pamu_reg_base;    /* Base address of PAMU regs*/
  48        unsigned int count;             /* The number of PAMUs */
  49};
  50
  51static struct paace *ppaact;
  52static struct paace *spaact;
  53static struct ome *omt;
  54
  55/*
  56 * Table for matching compatible strings, for device tree
  57 * guts node, for QorIQ SOCs.
  58 * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4
  59 * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
  60 * string would be used.
  61*/
  62static const struct of_device_id guts_device_ids[] = {
  63        { .compatible = "fsl,qoriq-device-config-1.0", },
  64        { .compatible = "fsl,qoriq-device-config-2.0", },
  65        {}
  66};
  67
  68
  69/*
  70 * Table for matching compatible strings, for device tree
  71 * L3 cache controller node.
  72 * "fsl,t4240-l3-cache-controller" corresponds to T4,
  73 * "fsl,b4860-l3-cache-controller" corresponds to B4 &
  74 * "fsl,p4080-l3-cache-controller" corresponds to other,
  75 * SOCs.
  76*/
  77static const struct of_device_id l3_device_ids[] = {
  78        { .compatible = "fsl,t4240-l3-cache-controller", },
  79        { .compatible = "fsl,b4860-l3-cache-controller", },
  80        { .compatible = "fsl,p4080-l3-cache-controller", },
  81        {}
  82};
  83
  84/* maximum subwindows permitted per liodn */
  85static u32 max_subwindow_count;
  86
  87/* Pool for fspi allocation */
  88struct gen_pool *spaace_pool;
  89
  90/**
  91 * pamu_get_max_subwin_cnt() - Return the maximum supported
  92 * subwindow count per liodn.
  93 *
  94 */
  95u32 pamu_get_max_subwin_cnt()
  96{
  97        return max_subwindow_count;
  98}
  99
 100/**
 101 * pamu_get_ppaace() - Return the primary PACCE
 102 * @liodn: liodn PAACT index for desired PAACE
 103 *
 104 * Returns the ppace pointer upon success else return
 105 * null.
 106 */
 107static struct paace *pamu_get_ppaace(int liodn)
 108{
 109        if (!ppaact || liodn >= PAACE_NUMBER_ENTRIES) {
 110                pr_debug("PPAACT doesn't exist\n");
 111                return NULL;
 112        }
 113
 114        return &ppaact[liodn];
 115}
 116
 117/**
 118 * pamu_enable_liodn() - Set valid bit of PACCE
 119 * @liodn: liodn PAACT index for desired PAACE
 120 *
 121 * Returns 0 upon success else error code < 0 returned
 122 */
 123int pamu_enable_liodn(int liodn)
 124{
 125        struct paace *ppaace;
 126
 127        ppaace = pamu_get_ppaace(liodn);
 128        if (!ppaace) {
 129                pr_debug("Invalid primary paace entry\n");
 130                return -ENOENT;
 131        }
 132
 133        if (!get_bf(ppaace->addr_bitfields, PPAACE_AF_WSE)) {
 134                pr_debug("liodn %d not configured\n", liodn);
 135                return -EINVAL;
 136        }
 137
 138        /* Ensure that all other stores to the ppaace complete first */
 139        mb();
 140
 141        set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
 142        mb();
 143
 144        return 0;
 145}
 146
 147/**
 148 * pamu_disable_liodn() - Clears valid bit of PACCE
 149 * @liodn: liodn PAACT index for desired PAACE
 150 *
 151 * Returns 0 upon success else error code < 0 returned
 152 */
 153int pamu_disable_liodn(int liodn)
 154{
 155        struct paace *ppaace;
 156
 157        ppaace = pamu_get_ppaace(liodn);
 158        if (!ppaace) {
 159                pr_debug("Invalid primary paace entry\n");
 160                return -ENOENT;
 161        }
 162
 163        set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
 164        mb();
 165
 166        return 0;
 167}
 168
 169/* Derive the window size encoding for a particular PAACE entry */
 170static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
 171{
 172        /* Bug if not a power of 2 */
 173        BUG_ON(!is_power_of_2(addrspace_size));
 174
 175        /* window size is 2^(WSE+1) bytes */
 176        return __ffs(addrspace_size) - 1;
 177}
 178
 179/* Derive the PAACE window count encoding for the subwindow count */
 180static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt)
 181{
 182       /* window count is 2^(WCE+1) bytes */
 183       return __ffs(subwindow_cnt) - 1;
 184}
 185
 186/*
 187 * Set the PAACE type as primary and set the coherency required domain
 188 * attribute
 189 */
 190static void pamu_init_ppaace(struct paace *ppaace)
 191{
 192        set_bf(ppaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_PRIMARY);
 193
 194        set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
 195               PAACE_M_COHERENCE_REQ);
 196}
 197
 198/*
 199 * Set the PAACE type as secondary and set the coherency required domain
 200 * attribute.
 201 */
 202static void pamu_init_spaace(struct paace *spaace)
 203{
 204        set_bf(spaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_SECONDARY);
 205        set_bf(spaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
 206               PAACE_M_COHERENCE_REQ);
 207}
 208
 209/*
 210 * Return the spaace (corresponding to the secondary window index)
 211 * for a particular ppaace.
 212 */
 213static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum)
 214{
 215        u32 subwin_cnt;
 216        struct paace *spaace = NULL;
 217
 218        subwin_cnt = 1UL << (get_bf(paace->impl_attr, PAACE_IA_WCE) + 1);
 219
 220        if (wnum < subwin_cnt)
 221                spaace = &spaact[paace->fspi + wnum];
 222        else
 223                pr_debug("secondary paace out of bounds\n");
 224
 225        return spaace;
 226}
 227
 228/**
 229 * pamu_get_fspi_and_allocate() - Allocates fspi index and reserves subwindows
 230 *                                required for primary PAACE in the secondary
 231 *                                PAACE table.
 232 * @subwin_cnt: Number of subwindows to be reserved.
 233 *
 234 * A PPAACE entry may have a number of associated subwindows. A subwindow
 235 * corresponds to a SPAACE entry in the SPAACT table. Each PAACE entry stores
 236 * the index (fspi) of the first SPAACE entry in the SPAACT table. This
 237 * function returns the index of the first SPAACE entry. The remaining
 238 * SPAACE entries are reserved contiguously from that index.
 239 *
 240 * Returns a valid fspi index in the range of 0 - SPAACE_NUMBER_ENTRIES on success.
 241 * If no SPAACE entry is available or the allocator can not reserve the required
 242 * number of contiguous entries function returns ULONG_MAX indicating a failure.
 243 *
 244*/
 245static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt)
 246{
 247        unsigned long spaace_addr;
 248
 249        spaace_addr = gen_pool_alloc(spaace_pool, subwin_cnt * sizeof(struct paace));
 250        if (!spaace_addr)
 251                return ULONG_MAX;
 252
 253        return (spaace_addr - (unsigned long)spaact) / (sizeof(struct paace));
 254}
 255
 256/* Release the subwindows reserved for a particular LIODN */
 257void pamu_free_subwins(int liodn)
 258{
 259        struct paace *ppaace;
 260        u32 subwin_cnt, size;
 261
 262        ppaace = pamu_get_ppaace(liodn);
 263        if (!ppaace) {
 264                pr_debug("Invalid liodn entry\n");
 265                return;
 266        }
 267
 268        if (get_bf(ppaace->addr_bitfields, PPAACE_AF_MW)) {
 269                subwin_cnt = 1UL << (get_bf(ppaace->impl_attr, PAACE_IA_WCE) + 1);
 270                size = (subwin_cnt - 1) * sizeof(struct paace);
 271                gen_pool_free(spaace_pool, (unsigned long)&spaact[ppaace->fspi], size);
 272                set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
 273        }
 274}
 275
 276/*
 277 * Function used for updating stash destination for the coressponding
 278 * LIODN.
 279 */
 280int  pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
 281{
 282        struct paace *paace;
 283
 284        paace = pamu_get_ppaace(liodn);
 285        if (!paace) {
 286                pr_debug("Invalid liodn entry\n");
 287                return -ENOENT;
 288        }
 289        if (subwin) {
 290                paace = pamu_get_spaace(paace, subwin - 1);
 291                if (!paace) {
 292                        return -ENOENT;
 293                }
 294        }
 295        set_bf(paace->impl_attr, PAACE_IA_CID, value);
 296
 297        mb();
 298
 299        return 0;
 300}
 301
 302/* Disable a subwindow corresponding to the LIODN */
 303int pamu_disable_spaace(int liodn, u32 subwin)
 304{
 305        struct paace *paace;
 306
 307        paace = pamu_get_ppaace(liodn);
 308        if (!paace) {
 309                pr_debug("Invalid liodn entry\n");
 310                return -ENOENT;
 311        }
 312        if (subwin) {
 313                paace = pamu_get_spaace(paace, subwin - 1);
 314                if (!paace) {
 315                        return -ENOENT;
 316                }
 317                set_bf(paace->addr_bitfields, PAACE_AF_V,
 318                         PAACE_V_INVALID);
 319        } else {
 320                set_bf(paace->addr_bitfields, PAACE_AF_AP,
 321                         PAACE_AP_PERMS_DENIED);
 322        }
 323
 324        mb();
 325
 326        return 0;
 327}
 328
 329
 330/**
 331 * pamu_config_paace() - Sets up PPAACE entry for specified liodn
 332 *
 333 * @liodn: Logical IO device number
 334 * @win_addr: starting address of DSA window
 335 * @win-size: size of DSA window
 336 * @omi: Operation mapping index -- if ~omi == 0 then omi not defined
 337 * @rpn: real (true physical) page number
 338 * @stashid: cache stash id for associated cpu -- if ~stashid == 0 then
 339 *           stashid not defined
 340 * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
 341 *           snoopid not defined
 342 * @subwin_cnt: number of sub-windows
 343 * @prot: window permissions
 344 *
 345 * Returns 0 upon success else error code < 0 returned
 346 */
 347int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
 348                       u32 omi, unsigned long rpn, u32 snoopid, u32 stashid,
 349                       u32 subwin_cnt, int prot)
 350{
 351        struct paace *ppaace;
 352        unsigned long fspi;
 353
 354        if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) {
 355                pr_debug("window size too small or not a power of two %llx\n", win_size);
 356                return -EINVAL;
 357        }
 358
 359        if (win_addr & (win_size - 1)) {
 360                pr_debug("window address is not aligned with window size\n");
 361                return -EINVAL;
 362        }
 363
 364        ppaace = pamu_get_ppaace(liodn);
 365        if (!ppaace) {
 366                return -ENOENT;
 367        }
 368
 369        /* window size is 2^(WSE+1) bytes */
 370        set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE,
 371                map_addrspace_size_to_wse(win_size));
 372
 373        pamu_init_ppaace(ppaace);
 374
 375        ppaace->wbah = win_addr >> (PAMU_PAGE_SHIFT + 20);
 376        set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL,
 377               (win_addr >> PAMU_PAGE_SHIFT));
 378
 379        /* set up operation mapping if it's configured */
 380        if (omi < OME_NUMBER_ENTRIES) {
 381                set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
 382                ppaace->op_encode.index_ot.omi = omi;
 383        } else if (~omi != 0) {
 384                pr_debug("bad operation mapping index: %d\n", omi);
 385                return -EINVAL;
 386        }
 387
 388        /* configure stash id */
 389        if (~stashid != 0)
 390                set_bf(ppaace->impl_attr, PAACE_IA_CID, stashid);
 391
 392        /* configure snoop id */
 393        if (~snoopid != 0)
 394                ppaace->domain_attr.to_host.snpid = snoopid;
 395
 396        if (subwin_cnt) {
 397                /* The first entry is in the primary PAACE instead */
 398                fspi = pamu_get_fspi_and_allocate(subwin_cnt - 1);
 399                if (fspi == ULONG_MAX) {
 400                        pr_debug("spaace indexes exhausted\n");
 401                        return -EINVAL;
 402                }
 403
 404                /* window count is 2^(WCE+1) bytes */
 405                set_bf(ppaace->impl_attr, PAACE_IA_WCE,
 406                       map_subwindow_cnt_to_wce(subwin_cnt));
 407                set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0x1);
 408                ppaace->fspi = fspi;
 409        } else {
 410                set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
 411                ppaace->twbah = rpn >> 20;
 412                set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, rpn);
 413                set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot);
 414                set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0);
 415                set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
 416        }
 417        mb();
 418
 419        return 0;
 420}
 421
 422/**
 423 * pamu_config_spaace() - Sets up SPAACE entry for specified subwindow
 424 *
 425 * @liodn:  Logical IO device number
 426 * @subwin_cnt:  number of sub-windows associated with dma-window
 427 * @subwin: subwindow index
 428 * @subwin_size: size of subwindow
 429 * @omi: Operation mapping index
 430 * @rpn: real (true physical) page number
 431 * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
 432 *                        snoopid not defined
 433 * @stashid: cache stash id for associated cpu
 434 * @enable: enable/disable subwindow after reconfiguration
 435 * @prot: sub window permissions
 436 *
 437 * Returns 0 upon success else error code < 0 returned
 438 */
 439int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
 440                       phys_addr_t subwin_size, u32 omi, unsigned long rpn,
 441                       u32 snoopid, u32 stashid, int enable, int prot)
 442{
 443        struct paace *paace;
 444
 445
 446        /* setup sub-windows */
 447        if (!subwin_cnt) {
 448                pr_debug("Invalid subwindow count\n");
 449                return -EINVAL;
 450        }
 451
 452        paace = pamu_get_ppaace(liodn);
 453        if (subwin > 0 && subwin < subwin_cnt && paace) {
 454                paace = pamu_get_spaace(paace, subwin - 1);
 455
 456                if (paace && !(paace->addr_bitfields & PAACE_V_VALID)) {
 457                        pamu_init_spaace(paace);
 458                        set_bf(paace->addr_bitfields, SPAACE_AF_LIODN, liodn);
 459                }
 460        }
 461
 462        if (!paace) {
 463                pr_debug("Invalid liodn entry\n");
 464                return -ENOENT;
 465        }
 466
 467        if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) {
 468                pr_debug("subwindow size out of range, or not a power of 2\n");
 469                return -EINVAL;
 470        }
 471
 472        if (rpn == ULONG_MAX) {
 473                pr_debug("real page number out of range\n");
 474                return -EINVAL;
 475        }
 476
 477        /* window size is 2^(WSE+1) bytes */
 478        set_bf(paace->win_bitfields, PAACE_WIN_SWSE,
 479               map_addrspace_size_to_wse(subwin_size));
 480
 481        set_bf(paace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
 482        paace->twbah = rpn >> 20;
 483        set_bf(paace->win_bitfields, PAACE_WIN_TWBAL, rpn);
 484        set_bf(paace->addr_bitfields, PAACE_AF_AP, prot);
 485
 486        /* configure snoop id */
 487        if (~snoopid != 0)
 488                paace->domain_attr.to_host.snpid = snoopid;
 489
 490        /* set up operation mapping if it's configured */
 491        if (omi < OME_NUMBER_ENTRIES) {
 492                set_bf(paace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
 493                paace->op_encode.index_ot.omi = omi;
 494        } else if (~omi != 0) {
 495                pr_debug("bad operation mapping index: %d\n", omi);
 496                return -EINVAL;
 497        }
 498
 499        if (~stashid != 0)
 500                set_bf(paace->impl_attr, PAACE_IA_CID, stashid);
 501
 502        smp_wmb();
 503
 504        if (enable)
 505                set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
 506
 507        mb();
 508
 509        return 0;
 510}
 511
 512/**
 513* get_ome_index() - Returns the index in the operation mapping table
 514*                   for device.
 515* @*omi_index: pointer for storing the index value
 516*
 517*/
 518void get_ome_index(u32 *omi_index, struct device *dev)
 519{
 520        if (of_device_is_compatible(dev->of_node, "fsl,qman-portal"))
 521                *omi_index = OMI_QMAN;
 522        if (of_device_is_compatible(dev->of_node, "fsl,qman"))
 523                *omi_index = OMI_QMAN_PRIV;
 524}
 525
 526/**
 527 * get_stash_id - Returns stash destination id corresponding to a
 528 *                cache type and vcpu.
 529 * @stash_dest_hint: L1, L2 or L3
 530 * @vcpu: vpcu target for a particular cache type.
 531 *
 532 * Returs stash on success or ~(u32)0 on failure.
 533 *
 534 */
 535u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
 536{
 537        const u32 *prop;
 538        struct device_node *node;
 539        u32 cache_level;
 540        int len, found = 0;
 541        int i;
 542
 543        /* Fastpath, exit early if L3/CPC cache is target for stashing */
 544        if (stash_dest_hint == PAMU_ATTR_CACHE_L3) {
 545                node = of_find_matching_node(NULL, l3_device_ids);
 546                if (node) {
 547                        prop = of_get_property(node, "cache-stash-id", 0);
 548                        if (!prop) {
 549                                pr_debug("missing cache-stash-id at %s\n", node->full_name);
 550                                of_node_put(node);
 551                                return ~(u32)0;
 552                        }
 553                        of_node_put(node);
 554                        return be32_to_cpup(prop);
 555                }
 556                return ~(u32)0;
 557        }
 558
 559        for_each_node_by_type(node, "cpu") {
 560                prop = of_get_property(node, "reg", &len);
 561                for (i = 0; i < len / sizeof(u32); i++) {
 562                        if (be32_to_cpup(&prop[i]) == vcpu) {
 563                                found = 1;
 564                                goto found_cpu_node;
 565                        }
 566                }
 567        }
 568found_cpu_node:
 569
 570        /* find the hwnode that represents the cache */
 571        for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) {
 572                if (stash_dest_hint == cache_level) {
 573                        prop = of_get_property(node, "cache-stash-id", 0);
 574                        if (!prop) {
 575                                pr_debug("missing cache-stash-id at %s\n", node->full_name);
 576                                of_node_put(node);
 577                                return ~(u32)0;
 578                        }
 579                        of_node_put(node);
 580                        return be32_to_cpup(prop);
 581                }
 582
 583                prop = of_get_property(node, "next-level-cache", 0);
 584                if (!prop) {
 585                        pr_debug("can't find next-level-cache at %s\n",
 586                                node->full_name);
 587                        of_node_put(node);
 588                        return ~(u32)0;  /* can't traverse any further */
 589                }
 590                of_node_put(node);
 591
 592                /* advance to next node in cache hierarchy */
 593                node = of_find_node_by_phandle(*prop);
 594                if (!node) {
 595                        pr_debug("Invalid node for cache hierarchy %s\n",
 596                                node->full_name);
 597                        return ~(u32)0;
 598                }
 599        }
 600
 601        pr_debug("stash dest not found for %d on vcpu %d\n",
 602                  stash_dest_hint, vcpu);
 603        return ~(u32)0;
 604}
 605
 606/* Identify if the PAACT table entry belongs to QMAN, BMAN or QMAN Portal */
 607#define QMAN_PAACE 1
 608#define QMAN_PORTAL_PAACE 2
 609#define BMAN_PAACE 3
 610
 611/**
 612 * Setup operation mapping and stash destinations for QMAN and QMAN portal.
 613 * Memory accesses to QMAN and BMAN private memory need not be coherent, so
 614 * clear the PAACE entry coherency attribute for them.
 615 */
 616static void setup_qbman_paace(struct paace *ppaace, int  paace_type)
 617{
 618        switch (paace_type) {
 619        case QMAN_PAACE:
 620                set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
 621                ppaace->op_encode.index_ot.omi = OMI_QMAN_PRIV;
 622                /* setup QMAN Private data stashing for the L3 cache */
 623                set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
 624                set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
 625                       0);
 626                break;
 627        case QMAN_PORTAL_PAACE:
 628                set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
 629                ppaace->op_encode.index_ot.omi = OMI_QMAN;
 630                /*Set DQRR and Frame stashing for the L3 cache */
 631                set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
 632                break;
 633        case BMAN_PAACE:
 634                set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
 635                       0);
 636                break;
 637        }
 638}
 639
 640/**
 641 * Setup the operation mapping table for various devices. This is a static
 642 * table where each table index corresponds to a particular device. PAMU uses
 643 * this table to translate device transaction to appropriate corenet
 644 * transaction.
 645 */
 646static void __init setup_omt(struct ome *omt)
 647{
 648        struct ome *ome;
 649
 650        /* Configure OMI_QMAN */
 651        ome = &omt[OMI_QMAN];
 652
 653        ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ;
 654        ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
 655        ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
 656        ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSAO;
 657
 658        ome->moe[IOE_DIRECT0_IDX] = EOE_VALID | EOE_LDEC;
 659        ome->moe[IOE_DIRECT1_IDX] = EOE_VALID | EOE_LDECPE;
 660
 661        /* Configure OMI_FMAN */
 662        ome = &omt[OMI_FMAN];
 663        ome->moe[IOE_READ_IDX]  = EOE_VALID | EOE_READI;
 664        ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
 665
 666        /* Configure OMI_QMAN private */
 667        ome = &omt[OMI_QMAN_PRIV];
 668        ome->moe[IOE_READ_IDX]  = EOE_VALID | EOE_READ;
 669        ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
 670        ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
 671        ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSA;
 672
 673        /* Configure OMI_CAAM */
 674        ome = &omt[OMI_CAAM];
 675        ome->moe[IOE_READ_IDX]  = EOE_VALID | EOE_READI;
 676        ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
 677}
 678
 679/*
 680 * Get the maximum number of PAACT table entries
 681 * and subwindows supported by PAMU
 682 */
 683static void get_pamu_cap_values(unsigned long pamu_reg_base)
 684{
 685        u32 pc_val;
 686
 687        pc_val = in_be32((u32 *)(pamu_reg_base + PAMU_PC3));
 688        /* Maximum number of subwindows per liodn */
 689        max_subwindow_count = 1 << (1 + PAMU_PC3_MWCE(pc_val));
 690}
 691
 692/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
 693int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
 694                   phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
 695                   phys_addr_t omt_phys)
 696{
 697        u32 *pc;
 698        struct pamu_mmap_regs *pamu_regs;
 699
 700        pc = (u32 *) (pamu_reg_base + PAMU_PC);
 701        pamu_regs = (struct pamu_mmap_regs *)
 702                (pamu_reg_base + PAMU_MMAP_REGS_BASE);
 703
 704        /* set up pointers to corenet control blocks */
 705
 706        out_be32(&pamu_regs->ppbah, upper_32_bits(ppaact_phys));
 707        out_be32(&pamu_regs->ppbal, lower_32_bits(ppaact_phys));
 708        ppaact_phys = ppaact_phys + PAACT_SIZE;
 709        out_be32(&pamu_regs->pplah, upper_32_bits(ppaact_phys));
 710        out_be32(&pamu_regs->pplal, lower_32_bits(ppaact_phys));
 711
 712        out_be32(&pamu_regs->spbah, upper_32_bits(spaact_phys));
 713        out_be32(&pamu_regs->spbal, lower_32_bits(spaact_phys));
 714        spaact_phys = spaact_phys + SPAACT_SIZE;
 715        out_be32(&pamu_regs->splah, upper_32_bits(spaact_phys));
 716        out_be32(&pamu_regs->splal, lower_32_bits(spaact_phys));
 717
 718        out_be32(&pamu_regs->obah, upper_32_bits(omt_phys));
 719        out_be32(&pamu_regs->obal, lower_32_bits(omt_phys));
 720        omt_phys = omt_phys + OMT_SIZE;
 721        out_be32(&pamu_regs->olah, upper_32_bits(omt_phys));
 722        out_be32(&pamu_regs->olal, lower_32_bits(omt_phys));
 723
 724        /*
 725         * set PAMU enable bit,
 726         * allow ppaact & omt to be cached
 727         * & enable PAMU access violation interrupts.
 728         */
 729
 730        out_be32((u32 *)(pamu_reg_base + PAMU_PICS),
 731                        PAMU_ACCESS_VIOLATION_ENABLE);
 732        out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC);
 733        return 0;
 734}
 735
 736/* Enable all device LIODNS */
 737static void __init setup_liodns(void)
 738{
 739        int i, len;
 740        struct paace *ppaace;
 741        struct device_node *node = NULL;
 742        const u32 *prop;
 743
 744        for_each_node_with_property(node, "fsl,liodn") {
 745                prop = of_get_property(node, "fsl,liodn", &len);
 746                for (i = 0; i < len / sizeof(u32); i++) {
 747                        int liodn;
 748
 749                        liodn = be32_to_cpup(&prop[i]);
 750                        if (liodn >= PAACE_NUMBER_ENTRIES) {
 751                                pr_debug("Invalid LIODN value %d\n", liodn);
 752                                continue;
 753                        }
 754                        ppaace = pamu_get_ppaace(liodn);
 755                        pamu_init_ppaace(ppaace);
 756                        /* window size is 2^(WSE+1) bytes */
 757                        set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, 35);
 758                        ppaace->wbah = 0;
 759                        set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
 760                        set_bf(ppaace->impl_attr, PAACE_IA_ATM,
 761                                PAACE_ATM_NO_XLATE);
 762                        set_bf(ppaace->addr_bitfields, PAACE_AF_AP,
 763                                PAACE_AP_PERMS_ALL);
 764                        if (of_device_is_compatible(node, "fsl,qman-portal"))
 765                                setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE);
 766                        if (of_device_is_compatible(node, "fsl,qman"))
 767                                setup_qbman_paace(ppaace, QMAN_PAACE);
 768                        if (of_device_is_compatible(node, "fsl,bman"))
 769                                setup_qbman_paace(ppaace, BMAN_PAACE);
 770                        mb();
 771                        pamu_enable_liodn(liodn);
 772                }
 773        }
 774}
 775
 776irqreturn_t pamu_av_isr(int irq, void *arg)
 777{
 778        struct pamu_isr_data *data = arg;
 779        phys_addr_t phys;
 780        unsigned int i, j, ret;
 781
 782        pr_emerg("access violation interrupt\n");
 783
 784        for (i = 0; i < data->count; i++) {
 785                void __iomem *p = data->pamu_reg_base + i * PAMU_OFFSET;
 786                u32 pics = in_be32(p + PAMU_PICS);
 787
 788                if (pics & PAMU_ACCESS_VIOLATION_STAT) {
 789                        u32 avs1 = in_be32(p + PAMU_AVS1);
 790                        struct paace *paace;
 791
 792                        pr_emerg("POES1=%08x\n", in_be32(p + PAMU_POES1));
 793                        pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2));
 794                        pr_emerg("AVS1=%08x\n", avs1);
 795                        pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2));
 796                        pr_emerg("AVA=%016llx\n", make64(in_be32(p + PAMU_AVAH),
 797                                in_be32(p + PAMU_AVAL)));
 798                        pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD));
 799                        pr_emerg("POEA=%016llx\n", make64(in_be32(p + PAMU_POEAH),
 800                                in_be32(p + PAMU_POEAL)));
 801
 802                        phys = make64(in_be32(p + PAMU_POEAH),
 803                                in_be32(p + PAMU_POEAL));
 804
 805                        /* Assume that POEA points to a PAACE */
 806                        if (phys) {
 807                                u32 *paace = phys_to_virt(phys);
 808
 809                                /* Only the first four words are relevant */
 810                                for (j = 0; j < 4; j++)
 811                                        pr_emerg("PAACE[%u]=%08x\n", j, in_be32(paace + j));
 812                        }
 813
 814                        /* clear access violation condition */
 815                        out_be32((p + PAMU_AVS1), avs1 & PAMU_AV_MASK);
 816                        paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT);
 817                        BUG_ON(!paace);
 818                        /* check if we got a violation for a disabled LIODN */
 819                        if (!get_bf(paace->addr_bitfields, PAACE_AF_V)) {
 820                                /*
 821                                 * As per hardware erratum A-003638, access
 822                                 * violation can be reported for a disabled
 823                                 * LIODN. If we hit that condition, disable
 824                                 * access violation reporting.
 825                                 */
 826                                pics &= ~PAMU_ACCESS_VIOLATION_ENABLE;
 827                        } else {
 828                                /* Disable the LIODN */
 829                                ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT);
 830                                BUG_ON(ret);
 831                                pr_emerg("Disabling liodn %x\n", avs1 >> PAMU_AVS1_LIODN_SHIFT);
 832                        }
 833                        out_be32((p + PAMU_PICS), pics);
 834                }
 835        }
 836
 837
 838        return IRQ_HANDLED;
 839}
 840
 841#define LAWAR_EN                0x80000000
 842#define LAWAR_TARGET_MASK       0x0FF00000
 843#define LAWAR_TARGET_SHIFT      20
 844#define LAWAR_SIZE_MASK         0x0000003F
 845#define LAWAR_CSDID_MASK        0x000FF000
 846#define LAWAR_CSDID_SHIFT       12
 847
 848#define LAW_SIZE_4K             0xb
 849
 850struct ccsr_law {
 851        u32     lawbarh;        /* LAWn base address high */
 852        u32     lawbarl;        /* LAWn base address low */
 853        u32     lawar;          /* LAWn attributes */
 854        u32     reserved;
 855};
 856
 857/*
 858 * Create a coherence subdomain for a given memory block.
 859 */
 860static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
 861{
 862        struct device_node *np;
 863        const __be32 *iprop;
 864        void __iomem *lac = NULL;       /* Local Access Control registers */
 865        struct ccsr_law __iomem *law;
 866        void __iomem *ccm = NULL;
 867        u32 __iomem *csdids;
 868        unsigned int i, num_laws, num_csds;
 869        u32 law_target = 0;
 870        u32 csd_id = 0;
 871        int ret = 0;
 872
 873        np = of_find_compatible_node(NULL, NULL, "fsl,corenet-law");
 874        if (!np)
 875                return -ENODEV;
 876
 877        iprop = of_get_property(np, "fsl,num-laws", NULL);
 878        if (!iprop) {
 879                ret = -ENODEV;
 880                goto error;
 881        }
 882
 883        num_laws = be32_to_cpup(iprop);
 884        if (!num_laws) {
 885                ret = -ENODEV;
 886                goto error;
 887        }
 888
 889        lac = of_iomap(np, 0);
 890        if (!lac) {
 891                ret = -ENODEV;
 892                goto error;
 893        }
 894
 895        /* LAW registers are at offset 0xC00 */
 896        law = lac + 0xC00;
 897
 898        of_node_put(np);
 899
 900        np = of_find_compatible_node(NULL, NULL, "fsl,corenet-cf");
 901        if (!np) {
 902                ret = -ENODEV;
 903                goto error;
 904        }
 905
 906        iprop = of_get_property(np, "fsl,ccf-num-csdids", NULL);
 907        if (!iprop) {
 908                ret = -ENODEV;
 909                goto error;
 910        }
 911
 912        num_csds = be32_to_cpup(iprop);
 913        if (!num_csds) {
 914                ret = -ENODEV;
 915                goto error;
 916        }
 917
 918        ccm = of_iomap(np, 0);
 919        if (!ccm) {
 920                ret = -ENOMEM;
 921                goto error;
 922        }
 923
 924        /* The undocumented CSDID registers are at offset 0x600 */
 925        csdids = ccm + 0x600;
 926
 927        of_node_put(np);
 928        np = NULL;
 929
 930        /* Find an unused coherence subdomain ID */
 931        for (csd_id = 0; csd_id < num_csds; csd_id++) {
 932                if (!csdids[csd_id])
 933                        break;
 934        }
 935
 936        /* Store the Port ID in the (undocumented) proper CIDMRxx register */
 937        csdids[csd_id] = csd_port_id;
 938
 939        /* Find the DDR LAW that maps to our buffer. */
 940        for (i = 0; i < num_laws; i++) {
 941                if (law[i].lawar & LAWAR_EN) {
 942                        phys_addr_t law_start, law_end;
 943
 944                        law_start = make64(law[i].lawbarh, law[i].lawbarl);
 945                        law_end = law_start +
 946                                (2ULL << (law[i].lawar & LAWAR_SIZE_MASK));
 947
 948                        if (law_start <= phys && phys < law_end) {
 949                                law_target = law[i].lawar & LAWAR_TARGET_MASK;
 950                                break;
 951                        }
 952                }
 953        }
 954
 955        if (i == 0 || i == num_laws) {
 956                /* This should never happen*/
 957                ret = -ENOENT;
 958                goto error;
 959        }
 960
 961        /* Find a free LAW entry */
 962        while (law[--i].lawar & LAWAR_EN) {
 963                if (i == 0) {
 964                        /* No higher priority LAW slots available */
 965                        ret = -ENOENT;
 966                        goto error;
 967                }
 968        }
 969
 970        law[i].lawbarh = upper_32_bits(phys);
 971        law[i].lawbarl = lower_32_bits(phys);
 972        wmb();
 973        law[i].lawar = LAWAR_EN | law_target | (csd_id << LAWAR_CSDID_SHIFT) |
 974                (LAW_SIZE_4K + get_order(size));
 975        wmb();
 976
 977error:
 978        if (ccm)
 979                iounmap(ccm);
 980
 981        if (lac)
 982                iounmap(lac);
 983
 984        if (np)
 985                of_node_put(np);
 986
 987        return ret;
 988}
 989
 990/*
 991 * Table of SVRs and the corresponding PORT_ID values. Port ID corresponds to a
 992 * bit map of snoopers for a given range of memory mapped by a LAW.
 993 *
 994 * All future CoreNet-enabled SOCs will have this erratum(A-004510) fixed, so this
 995 * table should never need to be updated.  SVRs are guaranteed to be unique, so
 996 * there is no worry that a future SOC will inadvertently have one of these
 997 * values.
 998 */
 999static const struct {
1000        u32 svr;
1001        u32 port_id;
1002} port_id_map[] = {
1003        {0x82100010, 0xFF000000},       /* P2040 1.0 */
1004        {0x82100011, 0xFF000000},       /* P2040 1.1 */
1005        {0x82100110, 0xFF000000},       /* P2041 1.0 */
1006        {0x82100111, 0xFF000000},       /* P2041 1.1 */
1007        {0x82110310, 0xFF000000},       /* P3041 1.0 */
1008        {0x82110311, 0xFF000000},       /* P3041 1.1 */
1009        {0x82010020, 0xFFF80000},       /* P4040 2.0 */
1010        {0x82000020, 0xFFF80000},       /* P4080 2.0 */
1011        {0x82210010, 0xFC000000},       /* P5010 1.0 */
1012        {0x82210020, 0xFC000000},       /* P5010 2.0 */
1013        {0x82200010, 0xFC000000},       /* P5020 1.0 */
1014        {0x82050010, 0xFF800000},       /* P5021 1.0 */
1015        {0x82040010, 0xFF800000},       /* P5040 1.0 */
1016};
1017
1018#define SVR_SECURITY    0x80000 /* The Security (E) bit */
1019
1020static int __init fsl_pamu_probe(struct platform_device *pdev)
1021{
1022        void __iomem *pamu_regs = NULL;
1023        struct ccsr_guts __iomem *guts_regs = NULL;
1024        u32 pamubypenr, pamu_counter;
1025        unsigned long pamu_reg_off;
1026        unsigned long pamu_reg_base;
1027        struct pamu_isr_data *data = NULL;
1028        struct device_node *guts_node;
1029        u64 size;
1030        struct page *p;
1031        int ret = 0;
1032        int irq;
1033        phys_addr_t ppaact_phys;
1034        phys_addr_t spaact_phys;
1035        phys_addr_t omt_phys;
1036        size_t mem_size = 0;
1037        unsigned int order = 0;
1038        u32 csd_port_id = 0;
1039        unsigned i;
1040        /*
1041         * enumerate all PAMUs and allocate and setup PAMU tables
1042         * for each of them,
1043         * NOTE : All PAMUs share the same LIODN tables.
1044         */
1045
1046        pamu_regs = of_iomap(pdev->dev.of_node, 0);
1047        if (!pamu_regs) {
1048                dev_err(&pdev->dev, "ioremap of PAMU node failed\n");
1049                return -ENOMEM;
1050        }
1051        of_get_address(pdev->dev.of_node, 0, &size, NULL);
1052
1053        irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1054        if (irq == NO_IRQ) {
1055                dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n");
1056                goto error;
1057        }
1058
1059        data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL);
1060        if (!data) {
1061                dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n");
1062                ret = -ENOMEM;
1063                goto error;
1064        }
1065        data->pamu_reg_base = pamu_regs;
1066        data->count = size / PAMU_OFFSET;
1067
1068        /* The ISR needs access to the regs, so we won't iounmap them */
1069        ret = request_irq(irq, pamu_av_isr, 0, "pamu", data);
1070        if (ret < 0) {
1071                dev_err(&pdev->dev, "error %i installing ISR for irq %i\n",
1072                        ret, irq);
1073                goto error;
1074        }
1075
1076        guts_node = of_find_matching_node(NULL, guts_device_ids);
1077        if (!guts_node) {
1078                dev_err(&pdev->dev, "could not find GUTS node %s\n",
1079                        pdev->dev.of_node->full_name);
1080                ret = -ENODEV;
1081                goto error;
1082        }
1083
1084        guts_regs = of_iomap(guts_node, 0);
1085        of_node_put(guts_node);
1086        if (!guts_regs) {
1087                dev_err(&pdev->dev, "ioremap of GUTS node failed\n");
1088                ret = -ENODEV;
1089                goto error;
1090        }
1091
1092        /* read in the PAMU capability registers */
1093        get_pamu_cap_values((unsigned long)pamu_regs);
1094        /*
1095         * To simplify the allocation of a coherency domain, we allocate the
1096         * PAACT and the OMT in the same memory buffer.  Unfortunately, this
1097         * wastes more memory compared to allocating the buffers separately.
1098         */
1099        /* Determine how much memory we need */
1100        mem_size = (PAGE_SIZE << get_order(PAACT_SIZE)) +
1101                (PAGE_SIZE << get_order(SPAACT_SIZE)) +
1102                (PAGE_SIZE << get_order(OMT_SIZE));
1103        order = get_order(mem_size);
1104
1105        p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
1106        if (!p) {
1107                dev_err(&pdev->dev, "unable to allocate PAACT/SPAACT/OMT block\n");
1108                ret = -ENOMEM;
1109                goto error;
1110        }
1111
1112        ppaact = page_address(p);
1113        ppaact_phys = page_to_phys(p);
1114
1115        /* Make sure the memory is naturally aligned */
1116        if (ppaact_phys & ((PAGE_SIZE << order) - 1)) {
1117                dev_err(&pdev->dev, "PAACT/OMT block is unaligned\n");
1118                ret = -ENOMEM;
1119                goto error;
1120        }
1121
1122        spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE));
1123        omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE));
1124
1125        dev_dbg(&pdev->dev, "ppaact virt=%p phys=0x%llx\n", ppaact,
1126                (unsigned long long) ppaact_phys);
1127
1128        /* Check to see if we need to implement the work-around on this SOC */
1129
1130        /* Determine the Port ID for our coherence subdomain */
1131        for (i = 0; i < ARRAY_SIZE(port_id_map); i++) {
1132                if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) {
1133                        csd_port_id = port_id_map[i].port_id;
1134                        dev_dbg(&pdev->dev, "found matching SVR %08x\n",
1135                                port_id_map[i].svr);
1136                        break;
1137                }
1138        }
1139
1140        if (csd_port_id) {
1141                dev_dbg(&pdev->dev, "creating coherency subdomain at address "
1142                        "0x%llx, size %zu, port id 0x%08x", ppaact_phys,
1143                        mem_size, csd_port_id);
1144
1145                ret = create_csd(ppaact_phys, mem_size, csd_port_id);
1146                if (ret) {
1147                        dev_err(&pdev->dev, "could not create coherence "
1148                                "subdomain\n");
1149                        return ret;
1150                }
1151        }
1152
1153        spaact_phys = virt_to_phys(spaact);
1154        omt_phys = virt_to_phys(omt);
1155
1156        spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1);
1157        if (!spaace_pool) {
1158                ret = -ENOMEM;
1159                dev_err(&pdev->dev, "PAMU : failed to allocate spaace gen pool\n");
1160                goto error;
1161        }
1162
1163        ret = gen_pool_add(spaace_pool, (unsigned long)spaact, SPAACT_SIZE, -1);
1164        if (ret)
1165                goto error_genpool;
1166
1167        pamubypenr = in_be32(&guts_regs->pamubypenr);
1168
1169        for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
1170             pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) {
1171
1172                pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off;
1173                setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys,
1174                                 spaact_phys, omt_phys);
1175                /* Disable PAMU bypass for this PAMU */
1176                pamubypenr &= ~pamu_counter;
1177        }
1178
1179        setup_omt(omt);
1180
1181        /* Enable all relevant PAMU(s) */
1182        out_be32(&guts_regs->pamubypenr, pamubypenr);
1183
1184        iounmap(guts_regs);
1185
1186        /* Enable DMA for the LIODNs in the device tree*/
1187
1188        setup_liodns();
1189
1190        return 0;
1191
1192error_genpool:
1193        gen_pool_destroy(spaace_pool);
1194
1195error:
1196        if (irq != NO_IRQ)
1197                free_irq(irq, data);
1198
1199        if (data) {
1200                memset(data, 0, sizeof(struct pamu_isr_data));
1201                kfree(data);
1202        }
1203
1204        if (pamu_regs)
1205                iounmap(pamu_regs);
1206
1207        if (guts_regs)
1208                iounmap(guts_regs);
1209
1210        if (ppaact)
1211                free_pages((unsigned long)ppaact, order);
1212
1213        ppaact = NULL;
1214
1215        return ret;
1216}
1217
1218static const struct of_device_id fsl_of_pamu_ids[] = {
1219        {
1220                .compatible = "fsl,p4080-pamu",
1221        },
1222        {
1223                .compatible = "fsl,pamu",
1224        },
1225        {},
1226};
1227
1228static struct platform_driver fsl_of_pamu_driver = {
1229        .driver = {
1230                .name = "fsl-of-pamu",
1231                .owner = THIS_MODULE,
1232        },
1233        .probe = fsl_pamu_probe,
1234};
1235
1236static __init int fsl_pamu_init(void)
1237{
1238        struct platform_device *pdev = NULL;
1239        struct device_node *np;
1240        int ret;
1241
1242        /*
1243         * The normal OF process calls the probe function at some
1244         * indeterminate later time, after most drivers have loaded.  This is
1245         * too late for us, because PAMU clients (like the Qman driver)
1246         * depend on PAMU being initialized early.
1247         *
1248         * So instead, we "manually" call our probe function by creating the
1249         * platform devices ourselves.
1250         */
1251
1252        /*
1253         * We assume that there is only one PAMU node in the device tree.  A
1254         * single PAMU node represents all of the PAMU devices in the SOC
1255         * already.   Everything else already makes that assumption, and the
1256         * binding for the PAMU nodes doesn't allow for any parent-child
1257         * relationships anyway.  In other words, support for more than one
1258         * PAMU node would require significant changes to a lot of code.
1259         */
1260
1261        np = of_find_compatible_node(NULL, NULL, "fsl,pamu");
1262        if (!np) {
1263                pr_err("could not find a PAMU node\n");
1264                return -ENODEV;
1265        }
1266
1267        ret = platform_driver_register(&fsl_of_pamu_driver);
1268        if (ret) {
1269                pr_err("could not register driver (err=%i)\n", ret);
1270                goto error_driver_register;
1271        }
1272
1273        pdev = platform_device_alloc("fsl-of-pamu", 0);
1274        if (!pdev) {
1275                pr_err("could not allocate device %s\n",
1276                       np->full_name);
1277                ret = -ENOMEM;
1278                goto error_device_alloc;
1279        }
1280        pdev->dev.of_node = of_node_get(np);
1281
1282        ret = pamu_domain_init();
1283        if (ret)
1284                goto error_device_add;
1285
1286        ret = platform_device_add(pdev);
1287        if (ret) {
1288                pr_err("could not add device %s (err=%i)\n",
1289                       np->full_name, ret);
1290                goto error_device_add;
1291        }
1292
1293        return 0;
1294
1295error_device_add:
1296        of_node_put(pdev->dev.of_node);
1297        pdev->dev.of_node = NULL;
1298
1299        platform_device_put(pdev);
1300
1301error_device_alloc:
1302        platform_driver_unregister(&fsl_of_pamu_driver);
1303
1304error_driver_register:
1305        of_node_put(np);
1306
1307        return ret;
1308}
1309arch_initcall(fsl_pamu_init);
1310
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.