1/* Copyright (C) 2009 Red Hat, Inc. 2 * 3 * See ../COPYING for licensing terms. 4 */ 5 6#include <linux/mm.h> 7#include <linux/mmu_context.h> 8#include <linux/export.h> 9#include <linux/sched.h> 10 11#include <asm/mmu_context.h> 12 13/* 14 * use_mm 15 * Makes the calling kernel thread take on the specified 16 * mm context. 17 * (Note: this routine is intended to be called only 18 * from a kernel thread context) 19 */ 20void use_mm(struct mm_struct *mm) 21{ 22 struct mm_struct *active_mm; 23 struct task_struct *tsk = current; 24 25 task_lock(tsk); 26 active_mm = tsk->active_mm; 27 if (active_mm != mm) { 28 atomic_inc(&mm->mm_count); 29 tsk->active_mm = mm; 30 } 31 tsk->mm = mm; 32 switch_mm(active_mm, mm, tsk); 33 task_unlock(tsk); 34 35 if (active_mm != mm) 36 mmdrop(active_mm); 37} 38EXPORT_SYMBOL_GPL(use_mm); 39 40/* 41 * unuse_mm 42 * Reverses the effect of use_mm, i.e. releases the 43 * specified mm context which was earlier taken on 44 * by the calling kernel thread 45 * (Note: this routine is intended to be called only 46 * from a kernel thread context) 47 */ 48void unuse_mm(struct mm_struct *mm) 49{ 50 struct task_struct *tsk = current; 51 52 task_lock(tsk); 53 sync_mm_rss(mm); 54 tsk->mm = NULL; 55 /* active_mm is still 'mm' */ 56 enter_lazy_tlb(mm, tsk); 57 task_unlock(tsk); 58} 59EXPORT_SYMBOL_GPL(unuse_mm); 60