linux/kernel/sched/features.h
<<
>>
Prefs
   1/*
   2 * Only give sleepers 50% of their service deficit. This allows
   3 * them to run sooner, but does not allow tons of sleepers to
   4 * rip the spread apart.
   5 */
   6SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)
   7
   8/*
   9 * Place new tasks ahead so that they do not starve already running
  10 * tasks
  11 */
  12SCHED_FEAT(START_DEBIT, true)
  13
  14/*
  15 * Prefer to schedule the task we woke last (assuming it failed
  16 * wakeup-preemption), since its likely going to consume data we
  17 * touched, increases cache locality.
  18 */
  19SCHED_FEAT(NEXT_BUDDY, false)
  20
  21/*
  22 * Prefer to schedule the task that ran last (when we did
  23 * wake-preempt) as that likely will touch the same data, increases
  24 * cache locality.
  25 */
  26SCHED_FEAT(LAST_BUDDY, true)
  27
  28/*
  29 * Consider buddies to be cache hot, decreases the likelyness of a
  30 * cache buddy being migrated away, increases cache locality.
  31 */
  32SCHED_FEAT(CACHE_HOT_BUDDY, true)
  33
  34/*
  35 * Use arch dependent cpu power functions
  36 */
  37SCHED_FEAT(ARCH_POWER, true)
  38
  39SCHED_FEAT(HRTICK, false)
  40SCHED_FEAT(DOUBLE_TICK, false)
  41SCHED_FEAT(LB_BIAS, true)
  42
  43/*
  44 * Spin-wait on mutex acquisition when the mutex owner is running on
  45 * another cpu -- assumes that when the owner is running, it will soon
  46 * release the lock. Decreases scheduling overhead.
  47 */
  48SCHED_FEAT(OWNER_SPIN, true)
  49
  50/*
  51 * Decrement CPU power based on time not spent running tasks
  52 */
  53SCHED_FEAT(NONTASK_POWER, true)
  54
  55/*
  56 * Queue remote wakeups on the target CPU and process them
  57 * using the scheduler IPI. Reduces rq->lock contention/bounces.
  58 */
  59SCHED_FEAT(TTWU_QUEUE, true)
  60
  61SCHED_FEAT(FORCE_SD_OVERLAP, false)
  62SCHED_FEAT(RT_RUNTIME_SHARE, true)
  63SCHED_FEAT(LB_MIN, false)
  64
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.