linux/arch/powerpc/platforms/85xx/smp.c
<<
>>
Prefs
   1/*
   2 * Author: Andy Fleming <afleming@freescale.com>
   3 *         Kumar Gala <galak@kernel.crashing.org>
   4 *
   5 * Copyright 2006-2008, 2011-2012 Freescale Semiconductor Inc.
   6 *
   7 * This program is free software; you can redistribute  it and/or modify it
   8 * under  the terms of  the GNU General  Public License as published by the
   9 * Free Software Foundation;  either version 2 of the  License, or (at your
  10 * option) any later version.
  11 */
  12
  13#include <linux/stddef.h>
  14#include <linux/kernel.h>
  15#include <linux/init.h>
  16#include <linux/delay.h>
  17#include <linux/of.h>
  18#include <linux/of_address.h>
  19#include <linux/kexec.h>
  20#include <linux/highmem.h>
  21#include <linux/cpu.h>
  22
  23#include <asm/machdep.h>
  24#include <asm/pgtable.h>
  25#include <asm/page.h>
  26#include <asm/mpic.h>
  27#include <asm/cacheflush.h>
  28#include <asm/dbell.h>
  29#include <asm/fsl_guts.h>
  30#include <asm/code-patching.h>
  31#include <asm/cputhreads.h>
  32
  33#include <sysdev/fsl_soc.h>
  34#include <sysdev/mpic.h>
  35#include "smp.h"
  36
  37struct epapr_spin_table {
  38        u32     addr_h;
  39        u32     addr_l;
  40        u32     r3_h;
  41        u32     r3_l;
  42        u32     reserved;
  43        u32     pir;
  44};
  45
  46static struct ccsr_guts __iomem *guts;
  47static u64 timebase;
  48static int tb_req;
  49static int tb_valid;
  50
  51static void mpc85xx_timebase_freeze(int freeze)
  52{
  53        uint32_t mask;
  54
  55        mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1;
  56        if (freeze)
  57                setbits32(&guts->devdisr, mask);
  58        else
  59                clrbits32(&guts->devdisr, mask);
  60
  61        in_be32(&guts->devdisr);
  62}
  63
  64static void mpc85xx_give_timebase(void)
  65{
  66        unsigned long flags;
  67
  68        local_irq_save(flags);
  69
  70        while (!tb_req)
  71                barrier();
  72        tb_req = 0;
  73
  74        mpc85xx_timebase_freeze(1);
  75#ifdef CONFIG_PPC64
  76        /*
  77         * e5500/e6500 have a workaround for erratum A-006958 in place
  78         * that will reread the timebase until TBL is non-zero.
  79         * That would be a bad thing when the timebase is frozen.
  80         *
  81         * Thus, we read it manually, and instead of checking that
  82         * TBL is non-zero, we ensure that TB does not change.  We don't
  83         * do that for the main mftb implementation, because it requires
  84         * a scratch register
  85         */
  86        {
  87                u64 prev;
  88
  89                asm volatile("mfspr %0, %1" : "=r" (timebase) :
  90                             "i" (SPRN_TBRL));
  91
  92                do {
  93                        prev = timebase;
  94                        asm volatile("mfspr %0, %1" : "=r" (timebase) :
  95                                     "i" (SPRN_TBRL));
  96                } while (prev != timebase);
  97        }
  98#else
  99        timebase = get_tb();
 100#endif
 101        mb();
 102        tb_valid = 1;
 103
 104        while (tb_valid)
 105                barrier();
 106
 107        mpc85xx_timebase_freeze(0);
 108
 109        local_irq_restore(flags);
 110}
 111
 112static void mpc85xx_take_timebase(void)
 113{
 114        unsigned long flags;
 115
 116        local_irq_save(flags);
 117
 118        tb_req = 1;
 119        while (!tb_valid)
 120                barrier();
 121
 122        set_tb(timebase >> 32, timebase & 0xffffffff);
 123        isync();
 124        tb_valid = 0;
 125
 126        local_irq_restore(flags);
 127}
 128
 129#ifdef CONFIG_HOTPLUG_CPU
 130static void smp_85xx_mach_cpu_die(void)
 131{
 132        unsigned int cpu = smp_processor_id();
 133        u32 tmp;
 134
 135        local_irq_disable();
 136        idle_task_exit();
 137        generic_set_cpu_dead(cpu);
 138        mb();
 139
 140        mtspr(SPRN_TCR, 0);
 141
 142        __flush_disable_L1();
 143        tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
 144        mtspr(SPRN_HID0, tmp);
 145        isync();
 146
 147        /* Enter NAP mode. */
 148        tmp = mfmsr();
 149        tmp |= MSR_WE;
 150        mb();
 151        mtmsr(tmp);
 152        isync();
 153
 154        while (1)
 155                ;
 156}
 157#endif
 158
 159static inline void flush_spin_table(void *spin_table)
 160{
 161        flush_dcache_range((ulong)spin_table,
 162                (ulong)spin_table + sizeof(struct epapr_spin_table));
 163}
 164
 165static inline u32 read_spin_table_addr_l(void *spin_table)
 166{
 167        flush_dcache_range((ulong)spin_table,
 168                (ulong)spin_table + sizeof(struct epapr_spin_table));
 169        return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l);
 170}
 171
 172#ifdef CONFIG_PPC64
 173static void wake_hw_thread(void *info)
 174{
 175        void fsl_secondary_thread_init(void);
 176        unsigned long imsr1, inia1;
 177        int nr = *(const int *)info;
 178
 179        imsr1 = MSR_KERNEL;
 180        inia1 = *(unsigned long *)fsl_secondary_thread_init;
 181
 182        mttmr(TMRN_IMSR1, imsr1);
 183        mttmr(TMRN_INIA1, inia1);
 184        mtspr(SPRN_TENS, TEN_THREAD(1));
 185
 186        smp_generic_kick_cpu(nr);
 187}
 188#endif
 189
 190static int smp_85xx_kick_cpu(int nr)
 191{
 192        unsigned long flags;
 193        const u64 *cpu_rel_addr;
 194        __iomem struct epapr_spin_table *spin_table;
 195        struct device_node *np;
 196        int hw_cpu = get_hard_smp_processor_id(nr);
 197        int ioremappable;
 198        int ret = 0;
 199
 200        WARN_ON(nr < 0 || nr >= NR_CPUS);
 201        WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
 202
 203        pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
 204
 205#ifdef CONFIG_PPC64
 206        /* Threads don't use the spin table */
 207        if (cpu_thread_in_core(nr) != 0) {
 208                int primary = cpu_first_thread_sibling(nr);
 209
 210                if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT)))
 211                        return -ENOENT;
 212
 213                if (cpu_thread_in_core(nr) != 1) {
 214                        pr_err("%s: cpu %d: invalid hw thread %d\n",
 215                               __func__, nr, cpu_thread_in_core(nr));
 216                        return -ENOENT;
 217                }
 218
 219                if (!cpu_online(primary)) {
 220                        pr_err("%s: cpu %d: primary %d not online\n",
 221                               __func__, nr, primary);
 222                        return -ENOENT;
 223                }
 224
 225                smp_call_function_single(primary, wake_hw_thread, &nr, 0);
 226                return 0;
 227        }
 228#endif
 229
 230        np = of_get_cpu_node(nr, NULL);
 231        cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
 232
 233        if (cpu_rel_addr == NULL) {
 234                printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
 235                return -ENOENT;
 236        }
 237
 238        /*
 239         * A secondary core could be in a spinloop in the bootpage
 240         * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
 241         * The bootpage and highmem can be accessed via ioremap(), but
 242         * we need to directly access the spinloop if its in lowmem.
 243         */
 244        ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
 245
 246        /* Map the spin table */
 247        if (ioremappable)
 248                spin_table = ioremap_prot(*cpu_rel_addr,
 249                        sizeof(struct epapr_spin_table), _PAGE_COHERENT);
 250        else
 251                spin_table = phys_to_virt(*cpu_rel_addr);
 252
 253        local_irq_save(flags);
 254#ifdef CONFIG_PPC32
 255#ifdef CONFIG_HOTPLUG_CPU
 256        /* Corresponding to generic_set_cpu_dead() */
 257        generic_set_cpu_up(nr);
 258
 259        if (system_state == SYSTEM_RUNNING) {
 260                /*
 261                 * To keep it compatible with old boot program which uses
 262                 * cache-inhibit spin table, we need to flush the cache
 263                 * before accessing spin table to invalidate any staled data.
 264                 * We also need to flush the cache after writing to spin
 265                 * table to push data out.
 266                 */
 267                flush_spin_table(spin_table);
 268                out_be32(&spin_table->addr_l, 0);
 269                flush_spin_table(spin_table);
 270
 271                /*
 272                 * We don't set the BPTR register here since it already points
 273                 * to the boot page properly.
 274                 */
 275                mpic_reset_core(nr);
 276
 277                /*
 278                 * wait until core is ready...
 279                 * We need to invalidate the stale data, in case the boot
 280                 * loader uses a cache-inhibited spin table.
 281                 */
 282                if (!spin_event_timeout(
 283                                read_spin_table_addr_l(spin_table) == 1,
 284                                10000, 100)) {
 285                        pr_err("%s: timeout waiting for core %d to reset\n",
 286                                                        __func__, hw_cpu);
 287                        ret = -ENOENT;
 288                        goto out;
 289                }
 290
 291                /*  clear the acknowledge status */
 292                __secondary_hold_acknowledge = -1;
 293        }
 294#endif
 295        flush_spin_table(spin_table);
 296        out_be32(&spin_table->pir, hw_cpu);
 297        out_be32(&spin_table->addr_l, __pa(__early_start));
 298        flush_spin_table(spin_table);
 299
 300        /* Wait a bit for the CPU to ack. */
 301        if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
 302                                        10000, 100)) {
 303                pr_err("%s: timeout waiting for core %d to ack\n",
 304                                                __func__, hw_cpu);
 305                ret = -ENOENT;
 306                goto out;
 307        }
 308out:
 309#else
 310        smp_generic_kick_cpu(nr);
 311
 312        flush_spin_table(spin_table);
 313        out_be32(&spin_table->pir, hw_cpu);
 314        out_be64((u64 *)(&spin_table->addr_h),
 315                __pa(ppc_function_entry(generic_secondary_smp_init)));
 316        flush_spin_table(spin_table);
 317#endif
 318
 319        local_irq_restore(flags);
 320
 321        if (ioremappable)
 322                iounmap(spin_table);
 323
 324        return ret;
 325}
 326
 327struct smp_ops_t smp_85xx_ops = {
 328        .kick_cpu = smp_85xx_kick_cpu,
 329        .cpu_bootable = smp_generic_cpu_bootable,
 330#ifdef CONFIG_HOTPLUG_CPU
 331        .cpu_disable    = generic_cpu_disable,
 332        .cpu_die        = generic_cpu_die,
 333#endif
 334#ifdef CONFIG_KEXEC
 335        .give_timebase  = smp_generic_give_timebase,
 336        .take_timebase  = smp_generic_take_timebase,
 337#endif
 338};
 339
 340#ifdef CONFIG_KEXEC
 341atomic_t kexec_down_cpus = ATOMIC_INIT(0);
 342
 343void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
 344{
 345        local_irq_disable();
 346
 347        if (secondary) {
 348                atomic_inc(&kexec_down_cpus);
 349                /* loop forever */
 350                while (1);
 351        }
 352}
 353
 354static void mpc85xx_smp_kexec_down(void *arg)
 355{
 356        if (ppc_md.kexec_cpu_down)
 357                ppc_md.kexec_cpu_down(0,1);
 358}
 359
 360static void map_and_flush(unsigned long paddr)
 361{
 362        struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
 363        unsigned long kaddr  = (unsigned long)kmap(page);
 364
 365        flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
 366        kunmap(page);
 367}
 368
 369/**
 370 * Before we reset the other cores, we need to flush relevant cache
 371 * out to memory so we don't get anything corrupted, some of these flushes
 372 * are performed out of an overabundance of caution as interrupts are not
 373 * disabled yet and we can switch cores
 374 */
 375static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image)
 376{
 377        kimage_entry_t *ptr, entry;
 378        unsigned long paddr;
 379        int i;
 380
 381        if (image->type == KEXEC_TYPE_DEFAULT) {
 382                /* normal kexec images are stored in temporary pages */
 383                for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
 384                     ptr = (entry & IND_INDIRECTION) ?
 385                                phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
 386                        if (!(entry & IND_DESTINATION)) {
 387                                map_and_flush(entry);
 388                        }
 389                }
 390                /* flush out last IND_DONE page */
 391                map_and_flush(entry);
 392        } else {
 393                /* crash type kexec images are copied to the crash region */
 394                for (i = 0; i < image->nr_segments; i++) {
 395                        struct kexec_segment *seg = &image->segment[i];
 396                        for (paddr = seg->mem; paddr < seg->mem + seg->memsz;
 397                             paddr += PAGE_SIZE) {
 398                                map_and_flush(paddr);
 399                        }
 400                }
 401        }
 402
 403        /* also flush the kimage struct to be passed in as well */
 404        flush_dcache_range((unsigned long)image,
 405                           (unsigned long)image + sizeof(*image));
 406}
 407
 408static void mpc85xx_smp_machine_kexec(struct kimage *image)
 409{
 410        int timeout = INT_MAX;
 411        int i, num_cpus = num_present_cpus();
 412
 413        mpc85xx_smp_flush_dcache_kexec(image);
 414
 415        if (image->type == KEXEC_TYPE_DEFAULT)
 416                smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
 417
 418        while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) &&
 419                ( timeout > 0 ) )
 420        {
 421                timeout--;
 422        }
 423
 424        if ( !timeout )
 425                printk(KERN_ERR "Unable to bring down secondary cpu(s)");
 426
 427        for_each_online_cpu(i)
 428        {
 429                if ( i == smp_processor_id() ) continue;
 430                mpic_reset_core(i);
 431        }
 432
 433        default_machine_kexec(image);
 434}
 435#endif /* CONFIG_KEXEC */
 436
 437static void smp_85xx_basic_setup(int cpu_nr)
 438{
 439        if (cpu_has_feature(CPU_FTR_DBELL))
 440                doorbell_setup_this_cpu();
 441}
 442
 443static void smp_85xx_setup_cpu(int cpu_nr)
 444{
 445        mpic_setup_this_cpu();
 446        smp_85xx_basic_setup(cpu_nr);
 447}
 448
 449static const struct of_device_id mpc85xx_smp_guts_ids[] = {
 450        { .compatible = "fsl,mpc8572-guts", },
 451        { .compatible = "fsl,p1020-guts", },
 452        { .compatible = "fsl,p1021-guts", },
 453        { .compatible = "fsl,p1022-guts", },
 454        { .compatible = "fsl,p1023-guts", },
 455        { .compatible = "fsl,p2020-guts", },
 456        {},
 457};
 458
 459void __init mpc85xx_smp_init(void)
 460{
 461        struct device_node *np;
 462
 463
 464        np = of_find_node_by_type(NULL, "open-pic");
 465        if (np) {
 466                smp_85xx_ops.probe = smp_mpic_probe;
 467                smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
 468                smp_85xx_ops.message_pass = smp_mpic_message_pass;
 469        } else
 470                smp_85xx_ops.setup_cpu = smp_85xx_basic_setup;
 471
 472        if (cpu_has_feature(CPU_FTR_DBELL)) {
 473                /*
 474                 * If left NULL, .message_pass defaults to
 475                 * smp_muxed_ipi_message_pass
 476                 */
 477                smp_85xx_ops.message_pass = NULL;
 478                smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
 479                smp_85xx_ops.probe = NULL;
 480        }
 481
 482        np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
 483        if (np) {
 484                guts = of_iomap(np, 0);
 485                of_node_put(np);
 486                if (!guts) {
 487                        pr_err("%s: Could not map guts node address\n",
 488                                                                __func__);
 489                        return;
 490                }
 491                smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
 492                smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
 493#ifdef CONFIG_HOTPLUG_CPU
 494                ppc_md.cpu_die = smp_85xx_mach_cpu_die;
 495#endif
 496        }
 497
 498        smp_ops = &smp_85xx_ops;
 499
 500#ifdef CONFIG_KEXEC
 501        ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down;
 502        ppc_md.machine_kexec = mpc85xx_smp_machine_kexec;
 503#endif
 504}
 505
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.