linux/arch/powerpc/mm/stab.c
<<
>>
Prefs
   1/*
   2 * PowerPC64 Segment Translation Support.
   3 *
   4 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
   5 *    Copyright (c) 2001 Dave Engebretsen
   6 *
   7 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
   8 *
   9 *      This program is free software; you can redistribute it and/or
  10 *      modify it under the terms of the GNU General Public License
  11 *      as published by the Free Software Foundation; either version
  12 *      2 of the License, or (at your option) any later version.
  13 */
  14
  15#include <linux/memblock.h>
  16
  17#include <asm/pgtable.h>
  18#include <asm/mmu.h>
  19#include <asm/mmu_context.h>
  20#include <asm/paca.h>
  21#include <asm/cputable.h>
  22#include <asm/prom.h>
  23#include <asm/abs_addr.h>
  24#include <asm/firmware.h>
  25#include <asm/iseries/hv_call.h>
  26
  27struct stab_entry {
  28        unsigned long esid_data;
  29        unsigned long vsid_data;
  30};
  31
  32#define NR_STAB_CACHE_ENTRIES 8
  33static DEFINE_PER_CPU(long, stab_cache_ptr);
  34static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES], stab_cache);
  35
  36/*
  37 * Create a segment table entry for the given esid/vsid pair.
  38 */
  39static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
  40{
  41        unsigned long esid_data, vsid_data;
  42        unsigned long entry, group, old_esid, castout_entry, i;
  43        unsigned int global_entry;
  44        struct stab_entry *ste, *castout_ste;
  45        unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
  46
  47        vsid_data = vsid << STE_VSID_SHIFT;
  48        esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
  49        if (! kernel_segment)
  50                esid_data |= STE_ESID_KS;
  51
  52        /* Search the primary group first. */
  53        global_entry = (esid & 0x1f) << 3;
  54        ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
  55
  56        /* Find an empty entry, if one exists. */
  57        for (group = 0; group < 2; group++) {
  58                for (entry = 0; entry < 8; entry++, ste++) {
  59                        if (!(ste->esid_data & STE_ESID_V)) {
  60                                ste->vsid_data = vsid_data;
  61                                eieio();
  62                                ste->esid_data = esid_data;
  63                                return (global_entry | entry);
  64                        }
  65                }
  66                /* Now search the secondary group. */
  67                global_entry = ((~esid) & 0x1f) << 3;
  68                ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
  69        }
  70
  71        /*
  72         * Could not find empty entry, pick one with a round robin selection.
  73         * Search all entries in the two groups.
  74         */
  75        castout_entry = get_paca()->stab_rr;
  76        for (i = 0; i < 16; i++) {
  77                if (castout_entry < 8) {
  78                        global_entry = (esid & 0x1f) << 3;
  79                        ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
  80                        castout_ste = ste + castout_entry;
  81                } else {
  82                        global_entry = ((~esid) & 0x1f) << 3;
  83                        ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
  84                        castout_ste = ste + (castout_entry - 8);
  85                }
  86
  87                /* Dont cast out the first kernel segment */
  88                if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
  89                        break;
  90
  91                castout_entry = (castout_entry + 1) & 0xf;
  92        }
  93
  94        get_paca()->stab_rr = (castout_entry + 1) & 0xf;
  95
  96        /* Modify the old entry to the new value. */
  97
  98        /* Force previous translations to complete. DRENG */
  99        asm volatile("isync" : : : "memory");
 100
 101        old_esid = castout_ste->esid_data >> SID_SHIFT;
 102        castout_ste->esid_data = 0;             /* Invalidate old entry */
 103
 104        asm volatile("sync" : : : "memory");    /* Order update */
 105
 106        castout_ste->vsid_data = vsid_data;
 107        eieio();                                /* Order update */
 108        castout_ste->esid_data = esid_data;
 109
 110        asm volatile("slbie  %0" : : "r" (old_esid << SID_SHIFT));
 111        /* Ensure completion of slbie */
 112        asm volatile("sync" : : : "memory");
 113
 114        return (global_entry | (castout_entry & 0x7));
 115}
 116
 117/*
 118 * Allocate a segment table entry for the given ea and mm
 119 */
 120static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
 121{
 122        unsigned long vsid;
 123        unsigned char stab_entry;
 124        unsigned long offset;
 125
 126        /* Kernel or user address? */
 127        if (is_kernel_addr(ea)) {
 128                vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
 129        } else {
 130                if ((ea >= TASK_SIZE_USER64) || (! mm))
 131                        return 1;
 132
 133                vsid = get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M);
 134        }
 135
 136        stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
 137
 138        if (!is_kernel_addr(ea)) {
 139                offset = __get_cpu_var(stab_cache_ptr);
 140                if (offset < NR_STAB_CACHE_ENTRIES)
 141                        __get_cpu_var(stab_cache[offset++]) = stab_entry;
 142                else
 143                        offset = NR_STAB_CACHE_ENTRIES+1;
 144                __get_cpu_var(stab_cache_ptr) = offset;
 145
 146                /* Order update */
 147                asm volatile("sync":::"memory");
 148        }
 149
 150        return 0;
 151}
 152
 153int ste_allocate(unsigned long ea)
 154{
 155        return __ste_allocate(ea, current->mm);
 156}
 157
 158/*
 159 * Do the segment table work for a context switch: flush all user
 160 * entries from the table, then preload some probably useful entries
 161 * for the new task
 162 */
 163void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
 164{
 165        struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
 166        struct stab_entry *ste;
 167        unsigned long offset;
 168        unsigned long pc = KSTK_EIP(tsk);
 169        unsigned long stack = KSTK_ESP(tsk);
 170        unsigned long unmapped_base;
 171
 172        /* Force previous translations to complete. DRENG */
 173        asm volatile("isync" : : : "memory");
 174
 175        /*
 176         * We need interrupts hard-disabled here, not just soft-disabled,
 177         * so that a PMU interrupt can't occur, which might try to access
 178         * user memory (to get a stack trace) and possible cause an STAB miss
 179         * which would update the stab_cache/stab_cache_ptr per-cpu variables.
 180         */
 181        hard_irq_disable();
 182
 183        offset = __get_cpu_var(stab_cache_ptr);
 184        if (offset <= NR_STAB_CACHE_ENTRIES) {
 185                int i;
 186
 187                for (i = 0; i < offset; i++) {
 188                        ste = stab + __get_cpu_var(stab_cache[i]);
 189                        ste->esid_data = 0; /* invalidate entry */
 190                }
 191        } else {
 192                unsigned long entry;
 193
 194                /* Invalidate all entries. */
 195                ste = stab;
 196
 197                /* Never flush the first entry. */
 198                ste += 1;
 199                for (entry = 1;
 200                     entry < (HW_PAGE_SIZE / sizeof(struct stab_entry));
 201                     entry++, ste++) {
 202                        unsigned long ea;
 203                        ea = ste->esid_data & ESID_MASK;
 204                        if (!is_kernel_addr(ea)) {
 205                                ste->esid_data = 0;
 206                        }
 207                }
 208        }
 209
 210        asm volatile("sync; slbia; sync":::"memory");
 211
 212        __get_cpu_var(stab_cache_ptr) = 0;
 213
 214        /* Now preload some entries for the new task */
 215        if (test_tsk_thread_flag(tsk, TIF_32BIT))
 216                unmapped_base = TASK_UNMAPPED_BASE_USER32;
 217        else
 218                unmapped_base = TASK_UNMAPPED_BASE_USER64;
 219
 220        __ste_allocate(pc, mm);
 221
 222        if (GET_ESID(pc) == GET_ESID(stack))
 223                return;
 224
 225        __ste_allocate(stack, mm);
 226
 227        if ((GET_ESID(pc) == GET_ESID(unmapped_base))
 228            || (GET_ESID(stack) == GET_ESID(unmapped_base)))
 229                return;
 230
 231        __ste_allocate(unmapped_base, mm);
 232
 233        /* Order update */
 234        asm volatile("sync" : : : "memory");
 235}
 236
 237/*
 238 * Allocate segment tables for secondary CPUs.  These must all go in
 239 * the first (bolted) segment, so that do_stab_bolted won't get a
 240 * recursive segment miss on the segment table itself.
 241 */
 242void __init stabs_alloc(void)
 243{
 244        int cpu;
 245
 246        if (mmu_has_feature(MMU_FTR_SLB))
 247                return;
 248
 249        for_each_possible_cpu(cpu) {
 250                unsigned long newstab;
 251
 252                if (cpu == 0)
 253                        continue; /* stab for CPU 0 is statically allocated */
 254
 255                newstab = memblock_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
 256                                         1<<SID_SHIFT);
 257                newstab = (unsigned long)__va(newstab);
 258
 259                memset((void *)newstab, 0, HW_PAGE_SIZE);
 260
 261                paca[cpu].stab_addr = newstab;
 262                paca[cpu].stab_real = virt_to_abs(newstab);
 263                printk(KERN_INFO "Segment table for CPU %d at 0x%llx "
 264                       "virtual, 0x%llx absolute\n",
 265                       cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
 266        }
 267}
 268
 269/*
 270 * Build an entry for the base kernel segment and put it into
 271 * the segment table or SLB.  All other segment table or SLB
 272 * entries are faulted in.
 273 */
 274void stab_initialize(unsigned long stab)
 275{
 276        unsigned long vsid = get_kernel_vsid(PAGE_OFFSET, MMU_SEGSIZE_256M);
 277        unsigned long stabreal;
 278
 279        asm volatile("isync; slbia; isync":::"memory");
 280        make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
 281
 282        /* Order update */
 283        asm volatile("sync":::"memory");
 284
 285        /* Set ASR */
 286        stabreal = get_paca()->stab_real | 0x1ul;
 287
 288#ifdef CONFIG_PPC_ISERIES
 289        if (firmware_has_feature(FW_FEATURE_ISERIES)) {
 290                HvCall1(HvCallBaseSetASR, stabreal);
 291                return;
 292        }
 293#endif /* CONFIG_PPC_ISERIES */
 294
 295        mtspr(SPRN_ASR, stabreal);
 296}
 297