linux/kernel/sched/stop_task.c
<<
>>
Prefs
   1#include "sched.h"
   2
   3/*
   4 * stop-task scheduling class.
   5 *
   6 * The stop task is the highest priority task in the system, it preempts
   7 * everything and will be preempted by nothing.
   8 *
   9 * See kernel/stop_machine.c
  10 */
  11
  12#ifdef CONFIG_SMP
  13static int
  14select_task_rq_stop(struct task_struct *p, int sd_flag, int flags)
  15{
  16        return task_cpu(p); /* stop tasks as never migrate */
  17}
  18#endif /* CONFIG_SMP */
  19
  20static void
  21check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
  22{
  23        /* we're never preempted */
  24}
  25
  26static struct task_struct *pick_next_task_stop(struct rq *rq)
  27{
  28        struct task_struct *stop = rq->stop;
  29
  30        if (stop && stop->on_rq) {
  31                stop->se.exec_start = rq->clock_task;
  32                return stop;
  33        }
  34
  35        return NULL;
  36}
  37
  38static void
  39enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
  40{
  41        inc_nr_running(rq);
  42}
  43
  44static void
  45dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
  46{
  47        dec_nr_running(rq);
  48}
  49
  50static void yield_task_stop(struct rq *rq)
  51{
  52        BUG(); /* the stop task should never yield, its pointless. */
  53}
  54
  55static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
  56{
  57        struct task_struct *curr = rq->curr;
  58        u64 delta_exec;
  59
  60        delta_exec = rq->clock_task - curr->se.exec_start;
  61        if (unlikely((s64)delta_exec < 0))
  62                delta_exec = 0;
  63
  64        schedstat_set(curr->se.statistics.exec_max,
  65                        max(curr->se.statistics.exec_max, delta_exec));
  66
  67        curr->se.sum_exec_runtime += delta_exec;
  68        account_group_exec_runtime(curr, delta_exec);
  69
  70        curr->se.exec_start = rq->clock_task;
  71        cpuacct_charge(curr, delta_exec);
  72}
  73
  74static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
  75{
  76}
  77
  78static void set_curr_task_stop(struct rq *rq)
  79{
  80        struct task_struct *stop = rq->stop;
  81
  82        stop->se.exec_start = rq->clock_task;
  83}
  84
  85static void switched_to_stop(struct rq *rq, struct task_struct *p)
  86{
  87        BUG(); /* its impossible to change to this class */
  88}
  89
  90static void
  91prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
  92{
  93        BUG(); /* how!?, what priority? */
  94}
  95
  96static unsigned int
  97get_rr_interval_stop(struct rq *rq, struct task_struct *task)
  98{
  99        return 0;
 100}
 101
 102/*
 103 * Simple, special scheduling class for the per-CPU stop tasks:
 104 */
 105const struct sched_class stop_sched_class = {
 106        .next                   = &rt_sched_class,
 107
 108        .enqueue_task           = enqueue_task_stop,
 109        .dequeue_task           = dequeue_task_stop,
 110        .yield_task             = yield_task_stop,
 111
 112        .check_preempt_curr     = check_preempt_curr_stop,
 113
 114        .pick_next_task         = pick_next_task_stop,
 115        .put_prev_task          = put_prev_task_stop,
 116
 117#ifdef CONFIG_SMP
 118        .select_task_rq         = select_task_rq_stop,
 119#endif
 120
 121        .set_curr_task          = set_curr_task_stop,
 122        .task_tick              = task_tick_stop,
 123
 124        .get_rr_interval        = get_rr_interval_stop,
 125
 126        .prio_changed           = prio_changed_stop,
 127        .switched_to            = switched_to_stop,
 128};
 129
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.