linux/mm/thrash.c
<<
>>
Prefs
   1/*
   2 * mm/thrash.c
   3 *
   4 * Copyright (C) 2004, Red Hat, Inc.
   5 * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
   6 * Released under the GPL, see the file COPYING for details.
   7 *
   8 * Simple token based thrashing protection, using the algorithm
   9 * described in: http://www.cse.ohio-state.edu/hpcs/WWW/HTML/publications/abs05-1.html
  10 *
  11 * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
  12 * Improved algorithm to pass token:
  13 * Each task has a priority which is incremented if it contended
  14 * for the token in an interval less than its previous attempt.
  15 * If the token is acquired, that task's priority is boosted to prevent
  16 * the token from bouncing around too often and to let the task make
  17 * some progress in its execution.
  18 */
  19
  20#include <linux/jiffies.h>
  21#include <linux/mm.h>
  22#include <linux/sched.h>
  23#include <linux/swap.h>
  24#include <linux/memcontrol.h>
  25
  26#include <trace/events/vmscan.h>
  27
  28#define TOKEN_AGING_INTERVAL    (0xFF)
  29
  30static DEFINE_SPINLOCK(swap_token_lock);
  31struct mm_struct *swap_token_mm;
  32struct mem_cgroup *swap_token_memcg;
  33
  34#ifdef CONFIG_CGROUP_MEM_RES_CTLR
  35static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
  36{
  37        struct mem_cgroup *memcg;
  38
  39        memcg = try_get_mem_cgroup_from_mm(mm);
  40        if (memcg)
  41                css_put(mem_cgroup_css(memcg));
  42
  43        return memcg;
  44}
  45#else
  46static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
  47{
  48        return NULL;
  49}
  50#endif
  51
  52void grab_swap_token(struct mm_struct *mm)
  53{
  54        int current_interval;
  55        unsigned int old_prio = mm->token_priority;
  56        static unsigned int global_faults;
  57        static unsigned int last_aging;
  58
  59        global_faults++;
  60
  61        current_interval = global_faults - mm->faultstamp;
  62
  63        if (!spin_trylock(&swap_token_lock))
  64                return;
  65
  66        /* First come first served */
  67        if (!swap_token_mm)
  68                goto replace_token;
  69
  70        /*
  71         * Usually, we don't need priority aging because long interval faults
  72         * makes priority decrease quickly. But there is one exception. If the
  73         * token owner task is sleeping, it never make long interval faults.
  74         * Thus, we need a priority aging mechanism instead. The requirements
  75         * of priority aging are
  76         *  1) An aging interval is reasonable enough long. Too short aging
  77         *     interval makes quick swap token lost and decrease performance.
  78         *  2) The swap token owner task have to get priority aging even if
  79         *     it's under sleep.
  80         */
  81        if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
  82                swap_token_mm->token_priority /= 2;
  83                last_aging = global_faults;
  84        }
  85
  86        if (mm == swap_token_mm) {
  87                mm->token_priority += 2;
  88                goto update_priority;
  89        }
  90
  91        if (current_interval < mm->last_interval)
  92                mm->token_priority++;
  93        else {
  94                if (likely(mm->token_priority > 0))
  95                        mm->token_priority--;
  96        }
  97
  98        /* Check if we deserve the token */
  99        if (mm->token_priority > swap_token_mm->token_priority)
 100                goto replace_token;
 101
 102update_priority:
 103        trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
 104
 105out:
 106        mm->faultstamp = global_faults;
 107        mm->last_interval = current_interval;
 108        spin_unlock(&swap_token_lock);
 109        return;
 110
 111replace_token:
 112        mm->token_priority += 2;
 113        trace_replace_swap_token(swap_token_mm, mm);
 114        swap_token_mm = mm;
 115        swap_token_memcg = swap_token_memcg_from_mm(mm);
 116        last_aging = global_faults;
 117        goto out;
 118}
 119
 120/* Called on process exit. */
 121void __put_swap_token(struct mm_struct *mm)
 122{
 123        spin_lock(&swap_token_lock);
 124        if (likely(mm == swap_token_mm)) {
 125                trace_put_swap_token(swap_token_mm);
 126                swap_token_mm = NULL;
 127                swap_token_memcg = NULL;
 128        }
 129        spin_unlock(&swap_token_lock);
 130}
 131
 132static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
 133{
 134        if (!a)
 135                return true;
 136        if (!b)
 137                return true;
 138        if (a == b)
 139                return true;
 140        return false;
 141}
 142
 143void disable_swap_token(struct mem_cgroup *memcg)
 144{
 145        /* memcg reclaim don't disable unrelated mm token. */
 146        if (match_memcg(memcg, swap_token_memcg)) {
 147                spin_lock(&swap_token_lock);
 148                if (match_memcg(memcg, swap_token_memcg)) {
 149                        trace_disable_swap_token(swap_token_mm);
 150                        swap_token_mm = NULL;
 151                        swap_token_memcg = NULL;
 152                }
 153                spin_unlock(&swap_token_lock);
 154        }
 155}
 156
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.