linux/arch/powerpc/mm/mmu_context_hash64.c
<<
>>
Prefs
   1/*
   2 *  MMU context allocation for 64-bit kernels.
   3 *
   4 *  Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
   5 *
   6 *  This program is free software; you can redistribute it and/or
   7 *  modify it under the terms of the GNU General Public License
   8 *  as published by the Free Software Foundation; either version
   9 *  2 of the License, or (at your option) any later version.
  10 *
  11 */
  12
  13#include <linux/sched.h>
  14#include <linux/kernel.h>
  15#include <linux/errno.h>
  16#include <linux/string.h>
  17#include <linux/types.h>
  18#include <linux/mm.h>
  19#include <linux/spinlock.h>
  20#include <linux/idr.h>
  21#include <linux/export.h>
  22#include <linux/gfp.h>
  23#include <linux/slab.h>
  24
  25#include <asm/mmu_context.h>
  26
  27#include "icswx.h"
  28
  29static DEFINE_SPINLOCK(mmu_context_lock);
  30static DEFINE_IDA(mmu_context_ida);
  31
  32/*
  33 * 256MB segment
  34 * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
  35 * available for user mappings. Each segment contains 2^28 bytes. Each
  36 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
  37 * (19 == 37 + 28 - 46).
  38 */
  39#define MAX_CONTEXT     ((1UL << CONTEXT_BITS) - 1)
  40
  41int __init_new_context(void)
  42{
  43        int index;
  44        int err;
  45
  46again:
  47        if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
  48                return -ENOMEM;
  49
  50        spin_lock(&mmu_context_lock);
  51        err = ida_get_new_above(&mmu_context_ida, 1, &index);
  52        spin_unlock(&mmu_context_lock);
  53
  54        if (err == -EAGAIN)
  55                goto again;
  56        else if (err)
  57                return err;
  58
  59        if (index > MAX_CONTEXT) {
  60                spin_lock(&mmu_context_lock);
  61                ida_remove(&mmu_context_ida, index);
  62                spin_unlock(&mmu_context_lock);
  63                return -ENOMEM;
  64        }
  65
  66        return index;
  67}
  68EXPORT_SYMBOL_GPL(__init_new_context);
  69
  70int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  71{
  72        int index;
  73
  74        index = __init_new_context();
  75        if (index < 0)
  76                return index;
  77
  78        /* The old code would re-promote on fork, we don't do that
  79         * when using slices as it could cause problem promoting slices
  80         * that have been forced down to 4K
  81         */
  82        if (slice_mm_new_context(mm))
  83                slice_set_user_psize(mm, mmu_virtual_psize);
  84        subpage_prot_init_new_context(mm);
  85        mm->context.id = index;
  86#ifdef CONFIG_PPC_ICSWX
  87        mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
  88        if (!mm->context.cop_lockp) {
  89                __destroy_context(index);
  90                subpage_prot_free(mm);
  91                mm->context.id = MMU_NO_CONTEXT;
  92                return -ENOMEM;
  93        }
  94        spin_lock_init(mm->context.cop_lockp);
  95#endif /* CONFIG_PPC_ICSWX */
  96
  97        return 0;
  98}
  99
 100void __destroy_context(int context_id)
 101{
 102        spin_lock(&mmu_context_lock);
 103        ida_remove(&mmu_context_ida, context_id);
 104        spin_unlock(&mmu_context_lock);
 105}
 106EXPORT_SYMBOL_GPL(__destroy_context);
 107
 108void destroy_context(struct mm_struct *mm)
 109{
 110#ifdef CONFIG_PPC_ICSWX
 111        drop_cop(mm->context.acop, mm);
 112        kfree(mm->context.cop_lockp);
 113        mm->context.cop_lockp = NULL;
 114#endif /* CONFIG_PPC_ICSWX */
 115        __destroy_context(mm->context.id);
 116        subpage_prot_free(mm);
 117        mm->context.id = MMU_NO_CONTEXT;
 118}
 119