linux/arch/arm/common/bL_switcher.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
   4 *
   5 * Created by:  Nicolas Pitre, March 2012
   6 * Copyright:   (C) 2012-2013  Linaro Limited
   7 */
   8
   9#include <linux/atomic.h>
  10#include <linux/init.h>
  11#include <linux/kernel.h>
  12#include <linux/module.h>
  13#include <linux/sched/signal.h>
  14#include <uapi/linux/sched/types.h>
  15#include <linux/interrupt.h>
  16#include <linux/cpu_pm.h>
  17#include <linux/cpu.h>
  18#include <linux/cpumask.h>
  19#include <linux/kthread.h>
  20#include <linux/wait.h>
  21#include <linux/time.h>
  22#include <linux/clockchips.h>
  23#include <linux/hrtimer.h>
  24#include <linux/tick.h>
  25#include <linux/notifier.h>
  26#include <linux/mm.h>
  27#include <linux/mutex.h>
  28#include <linux/smp.h>
  29#include <linux/spinlock.h>
  30#include <linux/string.h>
  31#include <linux/sysfs.h>
  32#include <linux/irqchip/arm-gic.h>
  33#include <linux/moduleparam.h>
  34
  35#include <asm/smp_plat.h>
  36#include <asm/cputype.h>
  37#include <asm/suspend.h>
  38#include <asm/mcpm.h>
  39#include <asm/bL_switcher.h>
  40
  41#define CREATE_TRACE_POINTS
  42#include <trace/events/power_cpu_migrate.h>
  43
  44
  45/*
  46 * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
  47 * __attribute_const__ and we don't want the compiler to assume any
  48 * constness here as the value _does_ change along some code paths.
  49 */
  50
  51static int read_mpidr(void)
  52{
  53        unsigned int id;
  54        asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
  55        return id & MPIDR_HWID_BITMASK;
  56}
  57
  58/*
  59 * bL switcher core code.
  60 */
  61
  62static void bL_do_switch(void *_arg)
  63{
  64        unsigned ib_mpidr, ib_cpu, ib_cluster;
  65        long volatile handshake, **handshake_ptr = _arg;
  66
  67        pr_debug("%s\n", __func__);
  68
  69        ib_mpidr = cpu_logical_map(smp_processor_id());
  70        ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
  71        ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
  72
  73        /* Advertise our handshake location */
  74        if (handshake_ptr) {
  75                handshake = 0;
  76                *handshake_ptr = &handshake;
  77        } else
  78                handshake = -1;
  79
  80        /*
  81         * Our state has been saved at this point.  Let's release our
  82         * inbound CPU.
  83         */
  84        mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
  85        sev();
  86
  87        /*
  88         * From this point, we must assume that our counterpart CPU might
  89         * have taken over in its parallel world already, as if execution
  90         * just returned from cpu_suspend().  It is therefore important to
  91         * be very careful not to make any change the other guy is not
  92         * expecting.  This is why we need stack isolation.
  93         *
  94         * Fancy under cover tasks could be performed here.  For now
  95         * we have none.
  96         */
  97
  98        /*
  99         * Let's wait until our inbound is alive.
 100         */
 101        while (!handshake) {
 102                wfe();
 103                smp_mb();
 104        }
 105
 106        /* Let's put ourself down. */
 107        mcpm_cpu_power_down();
 108
 109        /* should never get here */
 110        BUG();
 111}
 112
 113/*
 114 * Stack isolation.  To ensure 'current' remains valid, we just use another
 115 * piece of our thread's stack space which should be fairly lightly used.
 116 * The selected area starts just above the thread_info structure located
 117 * at the very bottom of the stack, aligned to a cache line, and indexed
 118 * with the cluster number.
 119 */
 120#define STACK_SIZE 512
 121extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
 122static int bL_switchpoint(unsigned long _arg)
 123{
 124        unsigned int mpidr = read_mpidr();
 125        unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
 126        void *stack = current_thread_info() + 1;
 127        stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
 128        stack += clusterid * STACK_SIZE + STACK_SIZE;
 129        call_with_stack(bL_do_switch, (void *)_arg, stack);
 130        BUG();
 131}
 132
 133/*
 134 * Generic switcher interface
 135 */
 136
 137static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
 138static int bL_switcher_cpu_pairing[NR_CPUS];
 139
 140/*
 141 * bL_switch_to - Switch to a specific cluster for the current CPU
 142 * @new_cluster_id: the ID of the cluster to switch to.
 143 *
 144 * This function must be called on the CPU to be switched.
 145 * Returns 0 on success, else a negative status code.
 146 */
 147static int bL_switch_to(unsigned int new_cluster_id)
 148{
 149        unsigned int mpidr, this_cpu, that_cpu;
 150        unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
 151        struct completion inbound_alive;
 152        long volatile *handshake_ptr;
 153        int ipi_nr, ret;
 154
 155        this_cpu = smp_processor_id();
 156        ob_mpidr = read_mpidr();
 157        ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
 158        ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
 159        BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
 160
 161        if (new_cluster_id == ob_cluster)
 162                return 0;
 163
 164        that_cpu = bL_switcher_cpu_pairing[this_cpu];
 165        ib_mpidr = cpu_logical_map(that_cpu);
 166        ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
 167        ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
 168
 169        pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
 170                 this_cpu, ob_mpidr, ib_mpidr);
 171
 172        this_cpu = smp_processor_id();
 173
 174        /* Close the gate for our entry vectors */
 175        mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
 176        mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
 177
 178        /* Install our "inbound alive" notifier. */
 179        init_completion(&inbound_alive);
 180        ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
 181        ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
 182        mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
 183
 184        /*
 185         * Let's wake up the inbound CPU now in case it requires some delay
 186         * to come online, but leave it gated in our entry vector code.
 187         */
 188        ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
 189        if (ret) {
 190                pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
 191                return ret;
 192        }
 193
 194        /*
 195         * Raise a SGI on the inbound CPU to make sure it doesn't stall
 196         * in a possible WFI, such as in bL_power_down().
 197         */
 198        gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
 199
 200        /*
 201         * Wait for the inbound to come up.  This allows for other
 202         * tasks to be scheduled in the mean time.
 203         */
 204        wait_for_completion(&inbound_alive);
 205        mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
 206
 207        /*
 208         * From this point we are entering the switch critical zone
 209         * and can't take any interrupts anymore.
 210         */
 211        local_irq_disable();
 212        local_fiq_disable();
 213        trace_cpu_migrate_begin(ktime_get_real_ns(), ob_mpidr);
 214
 215        /* redirect GIC's SGIs to our counterpart */
 216        gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
 217
 218        tick_suspend_local();
 219
 220        ret = cpu_pm_enter();
 221
 222        /* we can not tolerate errors at this point */
 223        if (ret)
 224                panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
 225
 226        /* Swap the physical CPUs in the logical map for this logical CPU. */
 227        cpu_logical_map(this_cpu) = ib_mpidr;
 228        cpu_logical_map(that_cpu) = ob_mpidr;
 229
 230        /* Let's do the actual CPU switch. */
 231        ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
 232        if (ret > 0)
 233                panic("%s: cpu_suspend() returned %d\n", __func__, ret);
 234
 235        /* We are executing on the inbound CPU at this point */
 236        mpidr = read_mpidr();
 237        pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
 238        BUG_ON(mpidr != ib_mpidr);
 239
 240        mcpm_cpu_powered_up();
 241
 242        ret = cpu_pm_exit();
 243
 244        tick_resume_local();
 245
 246        trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr);
 247        local_fiq_enable();
 248        local_irq_enable();
 249
 250        *handshake_ptr = 1;
 251        dsb_sev();
 252
 253        if (ret)
 254                pr_err("%s exiting with error %d\n", __func__, ret);
 255        return ret;
 256}
 257
 258struct bL_thread {
 259        spinlock_t lock;
 260        struct task_struct *task;
 261        wait_queue_head_t wq;
 262        int wanted_cluster;
 263        struct completion started;
 264        bL_switch_completion_handler completer;
 265        void *completer_cookie;
 266};
 267
 268static struct bL_thread bL_threads[NR_CPUS];
 269
 270static int bL_switcher_thread(void *arg)
 271{
 272        struct bL_thread *t = arg;
 273        int cluster;
 274        bL_switch_completion_handler completer;
 275        void *completer_cookie;
 276
 277        sched_set_fifo_low(current);
 278        complete(&t->started);
 279
 280        do {
 281                if (signal_pending(current))
 282                        flush_signals(current);
 283                wait_event_interruptible(t->wq,
 284                                t->wanted_cluster != -1 ||
 285                                kthread_should_stop());
 286
 287                spin_lock(&t->lock);
 288                cluster = t->wanted_cluster;
 289                completer = t->completer;
 290                completer_cookie = t->completer_cookie;
 291                t->wanted_cluster = -1;
 292                t->completer = NULL;
 293                spin_unlock(&t->lock);
 294
 295                if (cluster != -1) {
 296                        bL_switch_to(cluster);
 297
 298                        if (completer)
 299                                completer(completer_cookie);
 300                }
 301        } while (!kthread_should_stop());
 302
 303        return 0;
 304}
 305
 306static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
 307{
 308        struct task_struct *task;
 309
 310        task = kthread_create_on_node(bL_switcher_thread, arg,
 311                                      cpu_to_node(cpu), "kswitcher_%d", cpu);
 312        if (!IS_ERR(task)) {
 313                kthread_bind(task, cpu);
 314                wake_up_process(task);
 315        } else
 316                pr_err("%s failed for CPU %d\n", __func__, cpu);
 317        return task;
 318}
 319
 320/*
 321 * bL_switch_request_cb - Switch to a specific cluster for the given CPU,
 322 *      with completion notification via a callback
 323 *
 324 * @cpu: the CPU to switch
 325 * @new_cluster_id: the ID of the cluster to switch to.
 326 * @completer: switch completion callback.  if non-NULL,
 327 *      @completer(@completer_cookie) will be called on completion of
 328 *      the switch, in non-atomic context.
 329 * @completer_cookie: opaque context argument for @completer.
 330 *
 331 * This function causes a cluster switch on the given CPU by waking up
 332 * the appropriate switcher thread.  This function may or may not return
 333 * before the switch has occurred.
 334 *
 335 * If a @completer callback function is supplied, it will be called when
 336 * the switch is complete.  This can be used to determine asynchronously
 337 * when the switch is complete, regardless of when bL_switch_request()
 338 * returns.  When @completer is supplied, no new switch request is permitted
 339 * for the affected CPU until after the switch is complete, and @completer
 340 * has returned.
 341 */
 342int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
 343                         bL_switch_completion_handler completer,
 344                         void *completer_cookie)
 345{
 346        struct bL_thread *t;
 347
 348        if (cpu >= ARRAY_SIZE(bL_threads)) {
 349                pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
 350                return -EINVAL;
 351        }
 352
 353        t = &bL_threads[cpu];
 354
 355        if (IS_ERR(t->task))
 356                return PTR_ERR(t->task);
 357        if (!t->task)
 358                return -ESRCH;
 359
 360        spin_lock(&t->lock);
 361        if (t->completer) {
 362                spin_unlock(&t->lock);
 363                return -EBUSY;
 364        }
 365        t->completer = completer;
 366        t->completer_cookie = completer_cookie;
 367        t->wanted_cluster = new_cluster_id;
 368        spin_unlock(&t->lock);
 369        wake_up(&t->wq);
 370        return 0;
 371}
 372EXPORT_SYMBOL_GPL(bL_switch_request_cb);
 373
 374/*
 375 * Activation and configuration code.
 376 */
 377
 378static DEFINE_MUTEX(bL_switcher_activation_lock);
 379static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
 380static unsigned int bL_switcher_active;
 381static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
 382static cpumask_t bL_switcher_removed_logical_cpus;
 383
 384int bL_switcher_register_notifier(struct notifier_block *nb)
 385{
 386        return blocking_notifier_chain_register(&bL_activation_notifier, nb);
 387}
 388EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
 389
 390int bL_switcher_unregister_notifier(struct notifier_block *nb)
 391{
 392        return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
 393}
 394EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
 395
 396static int bL_activation_notify(unsigned long val)
 397{
 398        int ret;
 399
 400        ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
 401        if (ret & NOTIFY_STOP_MASK)
 402                pr_err("%s: notifier chain failed with status 0x%x\n",
 403                        __func__, ret);
 404        return notifier_to_errno(ret);
 405}
 406
 407static void bL_switcher_restore_cpus(void)
 408{
 409        int i;
 410
 411        for_each_cpu(i, &bL_switcher_removed_logical_cpus) {
 412                struct device *cpu_dev = get_cpu_device(i);
 413                int ret = device_online(cpu_dev);
 414                if (ret)
 415                        dev_err(cpu_dev, "switcher: unable to restore CPU\n");
 416        }
 417}
 418
 419static int bL_switcher_halve_cpus(void)
 420{
 421        int i, j, cluster_0, gic_id, ret;
 422        unsigned int cpu, cluster, mask;
 423        cpumask_t available_cpus;
 424
 425        /* First pass to validate what we have */
 426        mask = 0;
 427        for_each_online_cpu(i) {
 428                cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
 429                cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
 430                if (cluster >= 2) {
 431                        pr_err("%s: only dual cluster systems are supported\n", __func__);
 432                        return -EINVAL;
 433                }
 434                if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
 435                        return -EINVAL;
 436                mask |= (1 << cluster);
 437        }
 438        if (mask != 3) {
 439                pr_err("%s: no CPU pairing possible\n", __func__);
 440                return -EINVAL;
 441        }
 442
 443        /*
 444         * Now let's do the pairing.  We match each CPU with another CPU
 445         * from a different cluster.  To get a uniform scheduling behavior
 446         * without fiddling with CPU topology and compute capacity data,
 447         * we'll use logical CPUs initially belonging to the same cluster.
 448         */
 449        memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
 450        cpumask_copy(&available_cpus, cpu_online_mask);
 451        cluster_0 = -1;
 452        for_each_cpu(i, &available_cpus) {
 453                int match = -1;
 454                cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
 455                if (cluster_0 == -1)
 456                        cluster_0 = cluster;
 457                if (cluster != cluster_0)
 458                        continue;
 459                cpumask_clear_cpu(i, &available_cpus);
 460                for_each_cpu(j, &available_cpus) {
 461                        cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
 462                        /*
 463                         * Let's remember the last match to create "odd"
 464                         * pairings on purpose in order for other code not
 465                         * to assume any relation between physical and
 466                         * logical CPU numbers.
 467                         */
 468                        if (cluster != cluster_0)
 469                                match = j;
 470                }
 471                if (match != -1) {
 472                        bL_switcher_cpu_pairing[i] = match;
 473                        cpumask_clear_cpu(match, &available_cpus);
 474                        pr_info("CPU%d paired with CPU%d\n", i, match);
 475                }
 476        }
 477
 478        /*
 479         * Now we disable the unwanted CPUs i.e. everything that has no
 480         * pairing information (that includes the pairing counterparts).
 481         */
 482        cpumask_clear(&bL_switcher_removed_logical_cpus);
 483        for_each_online_cpu(i) {
 484                cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
 485                cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
 486
 487                /* Let's take note of the GIC ID for this CPU */
 488                gic_id = gic_get_cpu_id(i);
 489                if (gic_id < 0) {
 490                        pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
 491                        bL_switcher_restore_cpus();
 492                        return -EINVAL;
 493                }
 494                bL_gic_id[cpu][cluster] = gic_id;
 495                pr_info("GIC ID for CPU %u cluster %u is %u\n",
 496                        cpu, cluster, gic_id);
 497
 498                if (bL_switcher_cpu_pairing[i] != -1) {
 499                        bL_switcher_cpu_original_cluster[i] = cluster;
 500                        continue;
 501                }
 502
 503                ret = device_offline(get_cpu_device(i));
 504                if (ret) {
 505                        bL_switcher_restore_cpus();
 506                        return ret;
 507                }
 508                cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
 509        }
 510
 511        return 0;
 512}
 513
 514/* Determine the logical CPU a given physical CPU is grouped on. */
 515int bL_switcher_get_logical_index(u32 mpidr)
 516{
 517        int cpu;
 518
 519        if (!bL_switcher_active)
 520                return -EUNATCH;
 521
 522        mpidr &= MPIDR_HWID_BITMASK;
 523        for_each_online_cpu(cpu) {
 524                int pairing = bL_switcher_cpu_pairing[cpu];
 525                if (pairing == -1)
 526                        continue;
 527                if ((mpidr == cpu_logical_map(cpu)) ||
 528                    (mpidr == cpu_logical_map(pairing)))
 529                        return cpu;
 530        }
 531        return -EINVAL;
 532}
 533
 534static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
 535{
 536        trace_cpu_migrate_current(ktime_get_real_ns(), read_mpidr());
 537}
 538
 539int bL_switcher_trace_trigger(void)
 540{
 541        preempt_disable();
 542
 543        bL_switcher_trace_trigger_cpu(NULL);
 544        smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
 545
 546        preempt_enable();
 547
 548        return 0;
 549}
 550EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
 551
 552static int bL_switcher_enable(void)
 553{
 554        int cpu, ret;
 555
 556        mutex_lock(&bL_switcher_activation_lock);
 557        lock_device_hotplug();
 558        if (bL_switcher_active) {
 559                unlock_device_hotplug();
 560                mutex_unlock(&bL_switcher_activation_lock);
 561                return 0;
 562        }
 563
 564        pr_info("big.LITTLE switcher initializing\n");
 565
 566        ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
 567        if (ret)
 568                goto error;
 569
 570        ret = bL_switcher_halve_cpus();
 571        if (ret)
 572                goto error;
 573
 574        bL_switcher_trace_trigger();
 575
 576        for_each_online_cpu(cpu) {
 577                struct bL_thread *t = &bL_threads[cpu];
 578                spin_lock_init(&t->lock);
 579                init_waitqueue_head(&t->wq);
 580                init_completion(&t->started);
 581                t->wanted_cluster = -1;
 582                t->task = bL_switcher_thread_create(cpu, t);
 583        }
 584
 585        bL_switcher_active = 1;
 586        bL_activation_notify(BL_NOTIFY_POST_ENABLE);
 587        pr_info("big.LITTLE switcher initialized\n");
 588        goto out;
 589
 590error:
 591        pr_warn("big.LITTLE switcher initialization failed\n");
 592        bL_activation_notify(BL_NOTIFY_POST_DISABLE);
 593
 594out:
 595        unlock_device_hotplug();
 596        mutex_unlock(&bL_switcher_activation_lock);
 597        return ret;
 598}
 599
 600#ifdef CONFIG_SYSFS
 601
 602static void bL_switcher_disable(void)
 603{
 604        unsigned int cpu, cluster;
 605        struct bL_thread *t;
 606        struct task_struct *task;
 607
 608        mutex_lock(&bL_switcher_activation_lock);
 609        lock_device_hotplug();
 610
 611        if (!bL_switcher_active)
 612                goto out;
 613
 614        if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
 615                bL_activation_notify(BL_NOTIFY_POST_ENABLE);
 616                goto out;
 617        }
 618
 619        bL_switcher_active = 0;
 620
 621        /*
 622         * To deactivate the switcher, we must shut down the switcher
 623         * threads to prevent any other requests from being accepted.
 624         * Then, if the final cluster for given logical CPU is not the
 625         * same as the original one, we'll recreate a switcher thread
 626         * just for the purpose of switching the CPU back without any
 627         * possibility for interference from external requests.
 628         */
 629        for_each_online_cpu(cpu) {
 630                t = &bL_threads[cpu];
 631                task = t->task;
 632                t->task = NULL;
 633                if (!task || IS_ERR(task))
 634                        continue;
 635                kthread_stop(task);
 636                /* no more switch may happen on this CPU at this point */
 637                cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
 638                if (cluster == bL_switcher_cpu_original_cluster[cpu])
 639                        continue;
 640                init_completion(&t->started);
 641                t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
 642                task = bL_switcher_thread_create(cpu, t);
 643                if (!IS_ERR(task)) {
 644                        wait_for_completion(&t->started);
 645                        kthread_stop(task);
 646                        cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
 647                        if (cluster == bL_switcher_cpu_original_cluster[cpu])
 648                                continue;
 649                }
 650                /* If execution gets here, we're in trouble. */
 651                pr_crit("%s: unable to restore original cluster for CPU %d\n",
 652                        __func__, cpu);
 653                pr_crit("%s: CPU %d can't be restored\n",
 654                        __func__, bL_switcher_cpu_pairing[cpu]);
 655                cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
 656                                  &bL_switcher_removed_logical_cpus);
 657        }
 658
 659        bL_switcher_restore_cpus();
 660        bL_switcher_trace_trigger();
 661
 662        bL_activation_notify(BL_NOTIFY_POST_DISABLE);
 663
 664out:
 665        unlock_device_hotplug();
 666        mutex_unlock(&bL_switcher_activation_lock);
 667}
 668
 669static ssize_t bL_switcher_active_show(struct kobject *kobj,
 670                struct kobj_attribute *attr, char *buf)
 671{
 672        return sprintf(buf, "%u\n", bL_switcher_active);
 673}
 674
 675static ssize_t bL_switcher_active_store(struct kobject *kobj,
 676                struct kobj_attribute *attr, const char *buf, size_t count)
 677{
 678        int ret;
 679
 680        switch (buf[0]) {
 681        case '0':
 682                bL_switcher_disable();
 683                ret = 0;
 684                break;
 685        case '1':
 686                ret = bL_switcher_enable();
 687                break;
 688        default:
 689                ret = -EINVAL;
 690        }
 691
 692        return (ret >= 0) ? count : ret;
 693}
 694
 695static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj,
 696                struct kobj_attribute *attr, const char *buf, size_t count)
 697{
 698        int ret = bL_switcher_trace_trigger();
 699
 700        return ret ? ret : count;
 701}
 702
 703static struct kobj_attribute bL_switcher_active_attr =
 704        __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
 705
 706static struct kobj_attribute bL_switcher_trace_trigger_attr =
 707        __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store);
 708
 709static struct attribute *bL_switcher_attrs[] = {
 710        &bL_switcher_active_attr.attr,
 711        &bL_switcher_trace_trigger_attr.attr,
 712        NULL,
 713};
 714
 715static struct attribute_group bL_switcher_attr_group = {
 716        .attrs = bL_switcher_attrs,
 717};
 718
 719static struct kobject *bL_switcher_kobj;
 720
 721static int __init bL_switcher_sysfs_init(void)
 722{
 723        int ret;
 724
 725        bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
 726        if (!bL_switcher_kobj)
 727                return -ENOMEM;
 728        ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
 729        if (ret)
 730                kobject_put(bL_switcher_kobj);
 731        return ret;
 732}
 733
 734#endif  /* CONFIG_SYSFS */
 735
 736bool bL_switcher_get_enabled(void)
 737{
 738        mutex_lock(&bL_switcher_activation_lock);
 739
 740        return bL_switcher_active;
 741}
 742EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
 743
 744void bL_switcher_put_enabled(void)
 745{
 746        mutex_unlock(&bL_switcher_activation_lock);
 747}
 748EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
 749
 750/*
 751 * Veto any CPU hotplug operation on those CPUs we've removed
 752 * while the switcher is active.
 753 * We're just not ready to deal with that given the trickery involved.
 754 */
 755static int bL_switcher_cpu_pre(unsigned int cpu)
 756{
 757        int pairing;
 758
 759        if (!bL_switcher_active)
 760                return 0;
 761
 762        pairing = bL_switcher_cpu_pairing[cpu];
 763
 764        if (pairing == -1)
 765                return -EINVAL;
 766        return 0;
 767}
 768
 769static bool no_bL_switcher;
 770core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
 771
 772static int __init bL_switcher_init(void)
 773{
 774        int ret;
 775
 776        if (!mcpm_is_available())
 777                return -ENODEV;
 778
 779        cpuhp_setup_state_nocalls(CPUHP_ARM_BL_PREPARE, "arm/bl:prepare",
 780                                  bL_switcher_cpu_pre, NULL);
 781        ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/bl:predown",
 782                                        NULL, bL_switcher_cpu_pre);
 783        if (ret < 0) {
 784                cpuhp_remove_state_nocalls(CPUHP_ARM_BL_PREPARE);
 785                pr_err("bL_switcher: Failed to allocate a hotplug state\n");
 786                return ret;
 787        }
 788        if (!no_bL_switcher) {
 789                ret = bL_switcher_enable();
 790                if (ret)
 791                        return ret;
 792        }
 793
 794#ifdef CONFIG_SYSFS
 795        ret = bL_switcher_sysfs_init();
 796        if (ret)
 797                pr_err("%s: unable to create sysfs entry\n", __func__);
 798#endif
 799
 800        return 0;
 801}
 802
 803late_initcall(bL_switcher_init);
 804