linux/arch/powerpc/sysdev/xive/native.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright 2016,2017 IBM Corporation.
   4 */
   5
   6#define pr_fmt(fmt) "xive: " fmt
   7
   8#include <linux/types.h>
   9#include <linux/irq.h>
  10#include <linux/debugfs.h>
  11#include <linux/smp.h>
  12#include <linux/interrupt.h>
  13#include <linux/seq_file.h>
  14#include <linux/init.h>
  15#include <linux/of.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18#include <linux/delay.h>
  19#include <linux/cpumask.h>
  20#include <linux/mm.h>
  21#include <linux/kmemleak.h>
  22
  23#include <asm/machdep.h>
  24#include <asm/prom.h>
  25#include <asm/io.h>
  26#include <asm/smp.h>
  27#include <asm/irq.h>
  28#include <asm/errno.h>
  29#include <asm/xive.h>
  30#include <asm/xive-regs.h>
  31#include <asm/opal.h>
  32#include <asm/kvm_ppc.h>
  33
  34#include "xive-internal.h"
  35
  36
  37static u32 xive_provision_size;
  38static u32 *xive_provision_chips;
  39static u32 xive_provision_chip_count;
  40static u32 xive_queue_shift;
  41static u32 xive_pool_vps = XIVE_INVALID_VP;
  42static struct kmem_cache *xive_provision_cache;
  43static bool xive_has_single_esc;
  44
  45int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
  46{
  47        __be64 flags, eoi_page, trig_page;
  48        __be32 esb_shift, src_chip;
  49        u64 opal_flags;
  50        s64 rc;
  51
  52        memset(data, 0, sizeof(*data));
  53
  54        rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
  55                                    &esb_shift, &src_chip);
  56        if (rc) {
  57                pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
  58                       hw_irq, rc);
  59                return -EINVAL;
  60        }
  61
  62        opal_flags = be64_to_cpu(flags);
  63        if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
  64                data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
  65        if (opal_flags & OPAL_XIVE_IRQ_LSI)
  66                data->flags |= XIVE_IRQ_FLAG_LSI;
  67        data->eoi_page = be64_to_cpu(eoi_page);
  68        data->trig_page = be64_to_cpu(trig_page);
  69        data->esb_shift = be32_to_cpu(esb_shift);
  70        data->src_chip = be32_to_cpu(src_chip);
  71
  72        data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
  73        if (!data->eoi_mmio) {
  74                pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
  75                return -ENOMEM;
  76        }
  77
  78        data->hw_irq = hw_irq;
  79
  80        if (!data->trig_page)
  81                return 0;
  82        if (data->trig_page == data->eoi_page) {
  83                data->trig_mmio = data->eoi_mmio;
  84                return 0;
  85        }
  86
  87        data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
  88        if (!data->trig_mmio) {
  89                pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
  90                return -ENOMEM;
  91        }
  92        return 0;
  93}
  94EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
  95
  96int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
  97{
  98        s64 rc;
  99
 100        for (;;) {
 101                rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
 102                if (rc != OPAL_BUSY)
 103                        break;
 104                msleep(OPAL_BUSY_DELAY_MS);
 105        }
 106        return rc == 0 ? 0 : -ENXIO;
 107}
 108EXPORT_SYMBOL_GPL(xive_native_configure_irq);
 109
 110static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
 111                                      u32 *sw_irq)
 112{
 113        s64 rc;
 114        __be64 vp;
 115        __be32 lirq;
 116
 117        rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq);
 118
 119        *target = be64_to_cpu(vp);
 120        *sw_irq = be32_to_cpu(lirq);
 121
 122        return rc == 0 ? 0 : -ENXIO;
 123}
 124
 125#define vp_err(vp, fmt, ...) pr_err("VP[0x%x]: " fmt, vp, ##__VA_ARGS__)
 126
 127/* This can be called multiple time to change a queue configuration */
 128int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
 129                                __be32 *qpage, u32 order, bool can_escalate)
 130{
 131        s64 rc = 0;
 132        __be64 qeoi_page_be;
 133        __be32 esc_irq_be;
 134        u64 flags, qpage_phys;
 135
 136        /* If there's an actual queue page, clean it */
 137        if (order) {
 138                if (WARN_ON(!qpage))
 139                        return -EINVAL;
 140                qpage_phys = __pa(qpage);
 141        } else
 142                qpage_phys = 0;
 143
 144        /* Initialize the rest of the fields */
 145        q->msk = order ? ((1u << (order - 2)) - 1) : 0;
 146        q->idx = 0;
 147        q->toggle = 0;
 148
 149        rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
 150                                      &qeoi_page_be,
 151                                      &esc_irq_be,
 152                                      NULL);
 153        if (rc) {
 154                vp_err(vp_id, "Failed to get queue %d info : %lld\n", prio, rc);
 155                rc = -EIO;
 156                goto fail;
 157        }
 158        q->eoi_phys = be64_to_cpu(qeoi_page_be);
 159
 160        /* Default flags */
 161        flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
 162
 163        /* Escalation needed ? */
 164        if (can_escalate) {
 165                q->esc_irq = be32_to_cpu(esc_irq_be);
 166                flags |= OPAL_XIVE_EQ_ESCALATE;
 167        }
 168
 169        /* Configure and enable the queue in HW */
 170        for (;;) {
 171                rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
 172                if (rc != OPAL_BUSY)
 173                        break;
 174                msleep(OPAL_BUSY_DELAY_MS);
 175        }
 176        if (rc) {
 177                vp_err(vp_id, "Failed to set queue %d info: %lld\n", prio, rc);
 178                rc = -EIO;
 179        } else {
 180                /*
 181                 * KVM code requires all of the above to be visible before
 182                 * q->qpage is set due to how it manages IPI EOIs
 183                 */
 184                wmb();
 185                q->qpage = qpage;
 186        }
 187fail:
 188        return rc;
 189}
 190EXPORT_SYMBOL_GPL(xive_native_configure_queue);
 191
 192static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
 193{
 194        s64 rc;
 195
 196        /* Disable the queue in HW */
 197        for (;;) {
 198                rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
 199                if (rc != OPAL_BUSY)
 200                        break;
 201                msleep(OPAL_BUSY_DELAY_MS);
 202        }
 203        if (rc)
 204                vp_err(vp_id, "Failed to disable queue %d : %lld\n", prio, rc);
 205}
 206
 207void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
 208{
 209        __xive_native_disable_queue(vp_id, q, prio);
 210}
 211EXPORT_SYMBOL_GPL(xive_native_disable_queue);
 212
 213static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
 214{
 215        struct xive_q *q = &xc->queue[prio];
 216        __be32 *qpage;
 217
 218        qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
 219        if (IS_ERR(qpage))
 220                return PTR_ERR(qpage);
 221
 222        return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
 223                                           q, prio, qpage, xive_queue_shift, false);
 224}
 225
 226static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
 227{
 228        struct xive_q *q = &xc->queue[prio];
 229        unsigned int alloc_order;
 230
 231        /*
 232         * We use the variant with no iounmap as this is called on exec
 233         * from an IPI and iounmap isn't safe
 234         */
 235        __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
 236        alloc_order = xive_alloc_order(xive_queue_shift);
 237        free_pages((unsigned long)q->qpage, alloc_order);
 238        q->qpage = NULL;
 239}
 240
 241static bool xive_native_match(struct device_node *node)
 242{
 243        return of_device_is_compatible(node, "ibm,opal-xive-vc");
 244}
 245
 246static s64 opal_xive_allocate_irq(u32 chip_id)
 247{
 248        s64 irq = opal_xive_allocate_irq_raw(chip_id);
 249
 250        /*
 251         * Old versions of skiboot can incorrectly return 0xffffffff to
 252         * indicate no space, fix it up here.
 253         */
 254        return irq == 0xffffffff ? OPAL_RESOURCE : irq;
 255}
 256
 257#ifdef CONFIG_SMP
 258static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
 259{
 260        s64 irq;
 261
 262        /* Allocate an IPI and populate info about it */
 263        for (;;) {
 264                irq = opal_xive_allocate_irq(xc->chip_id);
 265                if (irq == OPAL_BUSY) {
 266                        msleep(OPAL_BUSY_DELAY_MS);
 267                        continue;
 268                }
 269                if (irq < 0) {
 270                        pr_err("Failed to allocate IPI on CPU %d\n", cpu);
 271                        return -ENXIO;
 272                }
 273                xc->hw_ipi = irq;
 274                break;
 275        }
 276        return 0;
 277}
 278#endif /* CONFIG_SMP */
 279
 280u32 xive_native_alloc_irq_on_chip(u32 chip_id)
 281{
 282        s64 rc;
 283
 284        for (;;) {
 285                rc = opal_xive_allocate_irq(chip_id);
 286                if (rc != OPAL_BUSY)
 287                        break;
 288                msleep(OPAL_BUSY_DELAY_MS);
 289        }
 290        if (rc < 0)
 291                return 0;
 292        return rc;
 293}
 294EXPORT_SYMBOL_GPL(xive_native_alloc_irq_on_chip);
 295
 296void xive_native_free_irq(u32 irq)
 297{
 298        for (;;) {
 299                s64 rc = opal_xive_free_irq(irq);
 300                if (rc != OPAL_BUSY)
 301                        break;
 302                msleep(OPAL_BUSY_DELAY_MS);
 303        }
 304}
 305EXPORT_SYMBOL_GPL(xive_native_free_irq);
 306
 307#ifdef CONFIG_SMP
 308static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
 309{
 310        s64 rc;
 311
 312        /* Free the IPI */
 313        if (xc->hw_ipi == XIVE_BAD_IRQ)
 314                return;
 315        for (;;) {
 316                rc = opal_xive_free_irq(xc->hw_ipi);
 317                if (rc == OPAL_BUSY) {
 318                        msleep(OPAL_BUSY_DELAY_MS);
 319                        continue;
 320                }
 321                xc->hw_ipi = XIVE_BAD_IRQ;
 322                break;
 323        }
 324}
 325#endif /* CONFIG_SMP */
 326
 327static void xive_native_shutdown(void)
 328{
 329        /* Switch the XIVE to emulation mode */
 330        opal_xive_reset(OPAL_XIVE_MODE_EMU);
 331}
 332
 333/*
 334 * Perform an "ack" cycle on the current thread, thus
 335 * grabbing the pending active priorities and updating
 336 * the CPPR to the most favored one.
 337 */
 338static void xive_native_update_pending(struct xive_cpu *xc)
 339{
 340        u8 he, cppr;
 341        u16 ack;
 342
 343        /* Perform the acknowledge hypervisor to register cycle */
 344        ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
 345
 346        /* Synchronize subsequent queue accesses */
 347        mb();
 348
 349        /*
 350         * Grab the CPPR and the "HE" field which indicates the source
 351         * of the hypervisor interrupt (if any)
 352         */
 353        cppr = ack & 0xff;
 354        he = (ack >> 8) >> 6;
 355        switch(he) {
 356        case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
 357                break;
 358        case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
 359                if (cppr == 0xff)
 360                        return;
 361                /* Mark the priority pending */
 362                xc->pending_prio |= 1 << cppr;
 363
 364                /*
 365                 * A new interrupt should never have a CPPR less favored
 366                 * than our current one.
 367                 */
 368                if (cppr >= xc->cppr)
 369                        pr_err("CPU %d odd ack CPPR, got %d at %d\n",
 370                               smp_processor_id(), cppr, xc->cppr);
 371
 372                /* Update our idea of what the CPPR is */
 373                xc->cppr = cppr;
 374                break;
 375        case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
 376        case TM_QW3_NSR_HE_LSI:  /* Legacy FW LSI (unused) */
 377                pr_err("CPU %d got unexpected interrupt type HE=%d\n",
 378                       smp_processor_id(), he);
 379                return;
 380        }
 381}
 382
 383static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
 384{
 385        xc->chip_id = cpu_to_chip_id(cpu);
 386}
 387
 388static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
 389{
 390        s64 rc;
 391        u32 vp;
 392        __be64 vp_cam_be;
 393        u64 vp_cam;
 394
 395        if (xive_pool_vps == XIVE_INVALID_VP)
 396                return;
 397
 398        /* Check if pool VP already active, if it is, pull it */
 399        if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
 400                in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
 401
 402        /* Enable the pool VP */
 403        vp = xive_pool_vps + cpu;
 404        for (;;) {
 405                rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
 406                if (rc != OPAL_BUSY)
 407                        break;
 408                msleep(OPAL_BUSY_DELAY_MS);
 409        }
 410        if (rc) {
 411                pr_err("Failed to enable pool VP on CPU %d\n", cpu);
 412                return;
 413        }
 414
 415        /* Grab it's CAM value */
 416        rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
 417        if (rc) {
 418                pr_err("Failed to get pool VP info CPU %d\n", cpu);
 419                return;
 420        }
 421        vp_cam = be64_to_cpu(vp_cam_be);
 422
 423        /* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
 424        out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
 425        out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
 426}
 427
 428static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
 429{
 430        s64 rc;
 431        u32 vp;
 432
 433        if (xive_pool_vps == XIVE_INVALID_VP)
 434                return;
 435
 436        /* Pull the pool VP from the CPU */
 437        in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
 438
 439        /* Disable it */
 440        vp = xive_pool_vps + cpu;
 441        for (;;) {
 442                rc = opal_xive_set_vp_info(vp, 0, 0);
 443                if (rc != OPAL_BUSY)
 444                        break;
 445                msleep(OPAL_BUSY_DELAY_MS);
 446        }
 447}
 448
 449void xive_native_sync_source(u32 hw_irq)
 450{
 451        opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
 452}
 453EXPORT_SYMBOL_GPL(xive_native_sync_source);
 454
 455void xive_native_sync_queue(u32 hw_irq)
 456{
 457        opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
 458}
 459EXPORT_SYMBOL_GPL(xive_native_sync_queue);
 460
 461static const struct xive_ops xive_native_ops = {
 462        .populate_irq_data      = xive_native_populate_irq_data,
 463        .configure_irq          = xive_native_configure_irq,
 464        .get_irq_config         = xive_native_get_irq_config,
 465        .setup_queue            = xive_native_setup_queue,
 466        .cleanup_queue          = xive_native_cleanup_queue,
 467        .match                  = xive_native_match,
 468        .shutdown               = xive_native_shutdown,
 469        .update_pending         = xive_native_update_pending,
 470        .prepare_cpu            = xive_native_prepare_cpu,
 471        .setup_cpu              = xive_native_setup_cpu,
 472        .teardown_cpu           = xive_native_teardown_cpu,
 473        .sync_source            = xive_native_sync_source,
 474#ifdef CONFIG_SMP
 475        .get_ipi                = xive_native_get_ipi,
 476        .put_ipi                = xive_native_put_ipi,
 477#endif /* CONFIG_SMP */
 478        .name                   = "native",
 479};
 480
 481static bool xive_parse_provisioning(struct device_node *np)
 482{
 483        int rc;
 484
 485        if (of_property_read_u32(np, "ibm,xive-provision-page-size",
 486                                 &xive_provision_size) < 0)
 487                return true;
 488        rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
 489        if (rc < 0) {
 490                pr_err("Error %d getting provision chips array\n", rc);
 491                return false;
 492        }
 493        xive_provision_chip_count = rc;
 494        if (rc == 0)
 495                return true;
 496
 497        xive_provision_chips = kcalloc(4, xive_provision_chip_count,
 498                                       GFP_KERNEL);
 499        if (WARN_ON(!xive_provision_chips))
 500                return false;
 501
 502        rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
 503                                        xive_provision_chips,
 504                                        xive_provision_chip_count);
 505        if (rc < 0) {
 506                pr_err("Error %d reading provision chips array\n", rc);
 507                return false;
 508        }
 509
 510        xive_provision_cache = kmem_cache_create("xive-provision",
 511                                                 xive_provision_size,
 512                                                 xive_provision_size,
 513                                                 0, NULL);
 514        if (!xive_provision_cache) {
 515                pr_err("Failed to allocate provision cache\n");
 516                return false;
 517        }
 518        return true;
 519}
 520
 521static void xive_native_setup_pools(void)
 522{
 523        /* Allocate a pool big enough */
 524        pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
 525
 526        xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
 527        if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
 528                pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
 529
 530        pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
 531                 xive_pool_vps, nr_cpu_ids);
 532}
 533
 534u32 xive_native_default_eq_shift(void)
 535{
 536        return xive_queue_shift;
 537}
 538EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
 539
 540unsigned long xive_tima_os;
 541EXPORT_SYMBOL_GPL(xive_tima_os);
 542
 543bool __init xive_native_init(void)
 544{
 545        struct device_node *np;
 546        struct resource r;
 547        void __iomem *tima;
 548        struct property *prop;
 549        u8 max_prio = 7;
 550        const __be32 *p;
 551        u32 val, cpu;
 552        s64 rc;
 553
 554        if (xive_cmdline_disabled)
 555                return false;
 556
 557        pr_devel("xive_native_init()\n");
 558        np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
 559        if (!np) {
 560                pr_devel("not found !\n");
 561                return false;
 562        }
 563        pr_devel("Found %pOF\n", np);
 564
 565        /* Resource 1 is HV window */
 566        if (of_address_to_resource(np, 1, &r)) {
 567                pr_err("Failed to get thread mgmnt area resource\n");
 568                return false;
 569        }
 570        tima = ioremap(r.start, resource_size(&r));
 571        if (!tima) {
 572                pr_err("Failed to map thread mgmnt area\n");
 573                return false;
 574        }
 575
 576        /* Read number of priorities */
 577        if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
 578                max_prio = val - 1;
 579
 580        /* Iterate the EQ sizes and pick one */
 581        of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
 582                xive_queue_shift = val;
 583                if (val == PAGE_SHIFT)
 584                        break;
 585        }
 586
 587        /* Do we support single escalation */
 588        if (of_get_property(np, "single-escalation-support", NULL) != NULL)
 589                xive_has_single_esc = true;
 590
 591        /* Configure Thread Management areas for KVM */
 592        for_each_possible_cpu(cpu)
 593                kvmppc_set_xive_tima(cpu, r.start, tima);
 594
 595        /* Resource 2 is OS window */
 596        if (of_address_to_resource(np, 2, &r)) {
 597                pr_err("Failed to get thread mgmnt area resource\n");
 598                return false;
 599        }
 600
 601        xive_tima_os = r.start;
 602
 603        /* Grab size of provisionning pages */
 604        xive_parse_provisioning(np);
 605
 606        /* Switch the XIVE to exploitation mode */
 607        rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
 608        if (rc) {
 609                pr_err("Switch to exploitation mode failed with error %lld\n", rc);
 610                return false;
 611        }
 612
 613        /* Setup some dummy HV pool VPs */
 614        xive_native_setup_pools();
 615
 616        /* Initialize XIVE core with our backend */
 617        if (!xive_core_init(np, &xive_native_ops, tima, TM_QW3_HV_PHYS,
 618                            max_prio)) {
 619                opal_xive_reset(OPAL_XIVE_MODE_EMU);
 620                return false;
 621        }
 622        pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
 623        return true;
 624}
 625
 626static bool xive_native_provision_pages(void)
 627{
 628        u32 i;
 629        void *p;
 630
 631        for (i = 0; i < xive_provision_chip_count; i++) {
 632                u32 chip = xive_provision_chips[i];
 633
 634                /*
 635                 * XXX TODO: Try to make the allocation local to the node where
 636                 * the chip resides.
 637                 */
 638                p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
 639                if (!p) {
 640                        pr_err("Failed to allocate provisioning page\n");
 641                        return false;
 642                }
 643                kmemleak_ignore(p);
 644                opal_xive_donate_page(chip, __pa(p));
 645        }
 646        return true;
 647}
 648
 649u32 xive_native_alloc_vp_block(u32 max_vcpus)
 650{
 651        s64 rc;
 652        u32 order;
 653
 654        order = fls(max_vcpus) - 1;
 655        if (max_vcpus > (1 << order))
 656                order++;
 657
 658        pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
 659                 max_vcpus, order);
 660
 661        for (;;) {
 662                rc = opal_xive_alloc_vp_block(order);
 663                switch (rc) {
 664                case OPAL_BUSY:
 665                        msleep(OPAL_BUSY_DELAY_MS);
 666                        break;
 667                case OPAL_XIVE_PROVISIONING:
 668                        if (!xive_native_provision_pages())
 669                                return XIVE_INVALID_VP;
 670                        break;
 671                default:
 672                        if (rc < 0) {
 673                                pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
 674                                       order, rc);
 675                                return XIVE_INVALID_VP;
 676                        }
 677                        return rc;
 678                }
 679        }
 680}
 681EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
 682
 683void xive_native_free_vp_block(u32 vp_base)
 684{
 685        s64 rc;
 686
 687        if (vp_base == XIVE_INVALID_VP)
 688                return;
 689
 690        rc = opal_xive_free_vp_block(vp_base);
 691        if (rc < 0)
 692                pr_warn("OPAL error %lld freeing VP block\n", rc);
 693}
 694EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
 695
 696int xive_native_enable_vp(u32 vp_id, bool single_escalation)
 697{
 698        s64 rc;
 699        u64 flags = OPAL_XIVE_VP_ENABLED;
 700
 701        if (single_escalation)
 702                flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
 703        for (;;) {
 704                rc = opal_xive_set_vp_info(vp_id, flags, 0);
 705                if (rc != OPAL_BUSY)
 706                        break;
 707                msleep(OPAL_BUSY_DELAY_MS);
 708        }
 709        if (rc)
 710                vp_err(vp_id, "Failed to enable VP : %lld\n", rc);
 711        return rc ? -EIO : 0;
 712}
 713EXPORT_SYMBOL_GPL(xive_native_enable_vp);
 714
 715int xive_native_disable_vp(u32 vp_id)
 716{
 717        s64 rc;
 718
 719        for (;;) {
 720                rc = opal_xive_set_vp_info(vp_id, 0, 0);
 721                if (rc != OPAL_BUSY)
 722                        break;
 723                msleep(OPAL_BUSY_DELAY_MS);
 724        }
 725        if (rc)
 726                vp_err(vp_id, "Failed to disable VP : %lld\n", rc);
 727        return rc ? -EIO : 0;
 728}
 729EXPORT_SYMBOL_GPL(xive_native_disable_vp);
 730
 731int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
 732{
 733        __be64 vp_cam_be;
 734        __be32 vp_chip_id_be;
 735        s64 rc;
 736
 737        rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
 738        if (rc) {
 739                vp_err(vp_id, "Failed to get VP info : %lld\n", rc);
 740                return -EIO;
 741        }
 742        *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
 743        *out_chip_id = be32_to_cpu(vp_chip_id_be);
 744
 745        return 0;
 746}
 747EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
 748
 749bool xive_native_has_single_escalation(void)
 750{
 751        return xive_has_single_esc;
 752}
 753EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
 754
 755int xive_native_get_queue_info(u32 vp_id, u32 prio,
 756                               u64 *out_qpage,
 757                               u64 *out_qsize,
 758                               u64 *out_qeoi_page,
 759                               u32 *out_escalate_irq,
 760                               u64 *out_qflags)
 761{
 762        __be64 qpage;
 763        __be64 qsize;
 764        __be64 qeoi_page;
 765        __be32 escalate_irq;
 766        __be64 qflags;
 767        s64 rc;
 768
 769        rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
 770                                      &qeoi_page, &escalate_irq, &qflags);
 771        if (rc) {
 772                vp_err(vp_id, "failed to get queue %d info : %lld\n", prio, rc);
 773                return -EIO;
 774        }
 775
 776        if (out_qpage)
 777                *out_qpage = be64_to_cpu(qpage);
 778        if (out_qsize)
 779                *out_qsize = be32_to_cpu(qsize);
 780        if (out_qeoi_page)
 781                *out_qeoi_page = be64_to_cpu(qeoi_page);
 782        if (out_escalate_irq)
 783                *out_escalate_irq = be32_to_cpu(escalate_irq);
 784        if (out_qflags)
 785                *out_qflags = be64_to_cpu(qflags);
 786
 787        return 0;
 788}
 789EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
 790
 791int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
 792{
 793        __be32 opal_qtoggle;
 794        __be32 opal_qindex;
 795        s64 rc;
 796
 797        rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
 798                                       &opal_qindex);
 799        if (rc) {
 800                vp_err(vp_id, "failed to get queue %d state : %lld\n", prio, rc);
 801                return -EIO;
 802        }
 803
 804        if (qtoggle)
 805                *qtoggle = be32_to_cpu(opal_qtoggle);
 806        if (qindex)
 807                *qindex = be32_to_cpu(opal_qindex);
 808
 809        return 0;
 810}
 811EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
 812
 813int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
 814{
 815        s64 rc;
 816
 817        rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
 818        if (rc) {
 819                vp_err(vp_id, "failed to set queue %d state : %lld\n", prio, rc);
 820                return -EIO;
 821        }
 822
 823        return 0;
 824}
 825EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
 826
 827bool xive_native_has_queue_state_support(void)
 828{
 829        return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
 830                opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
 831}
 832EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
 833
 834int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
 835{
 836        __be64 state;
 837        s64 rc;
 838
 839        rc = opal_xive_get_vp_state(vp_id, &state);
 840        if (rc) {
 841                vp_err(vp_id, "failed to get vp state : %lld\n", rc);
 842                return -EIO;
 843        }
 844
 845        if (out_state)
 846                *out_state = be64_to_cpu(state);
 847        return 0;
 848}
 849EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
 850
 851machine_arch_initcall(powernv, xive_core_debug_init);
 852