linux/kernel/workqueue.c
<<
>>
Prefs
   1/*
   2 * kernel/workqueue.c - generic async execution with shared worker pool
   3 *
   4 * Copyright (C) 2002           Ingo Molnar
   5 *
   6 *   Derived from the taskqueue/keventd code by:
   7 *     David Woodhouse <dwmw2@infradead.org>
   8 *     Andrew Morton
   9 *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
  10 *     Theodore Ts'o <tytso@mit.edu>
  11 *
  12 * Made to use alloc_percpu by Christoph Lameter.
  13 *
  14 * Copyright (C) 2010           SUSE Linux Products GmbH
  15 * Copyright (C) 2010           Tejun Heo <tj@kernel.org>
  16 *
  17 * This is the generic async execution mechanism.  Work items as are
  18 * executed in process context.  The worker pool is shared and
  19 * automatically managed.  There is one worker pool for each CPU and
  20 * one extra for works which are better served by workers which are
  21 * not bound to any specific CPU.
  22 *
  23 * Please read Documentation/workqueue.txt for details.
  24 */
  25
  26#include <linux/export.h>
  27#include <linux/kernel.h>
  28#include <linux/sched.h>
  29#include <linux/init.h>
  30#include <linux/signal.h>
  31#include <linux/completion.h>
  32#include <linux/workqueue.h>
  33#include <linux/slab.h>
  34#include <linux/cpu.h>
  35#include <linux/notifier.h>
  36#include <linux/kthread.h>
  37#include <linux/hardirq.h>
  38#include <linux/mempolicy.h>
  39#include <linux/freezer.h>
  40#include <linux/kallsyms.h>
  41#include <linux/debug_locks.h>
  42#include <linux/lockdep.h>
  43#include <linux/idr.h>
  44
  45#include "workqueue_sched.h"
  46
  47enum {
  48        /*
  49         * global_cwq flags
  50         *
  51         * A bound gcwq is either associated or disassociated with its CPU.
  52         * While associated (!DISASSOCIATED), all workers are bound to the
  53         * CPU and none has %WORKER_UNBOUND set and concurrency management
  54         * is in effect.
  55         *
  56         * While DISASSOCIATED, the cpu may be offline and all workers have
  57         * %WORKER_UNBOUND set and concurrency management disabled, and may
  58         * be executing on any CPU.  The gcwq behaves as an unbound one.
  59         *
  60         * Note that DISASSOCIATED can be flipped only while holding
  61         * managership of all pools on the gcwq to avoid changing binding
  62         * state while create_worker() is in progress.
  63         */
  64        GCWQ_DISASSOCIATED      = 1 << 0,       /* cpu can't serve workers */
  65        GCWQ_FREEZING           = 1 << 1,       /* freeze in progress */
  66
  67        /* pool flags */
  68        POOL_MANAGE_WORKERS     = 1 << 0,       /* need to manage workers */
  69        POOL_MANAGING_WORKERS   = 1 << 1,       /* managing workers */
  70
  71        /* worker flags */
  72        WORKER_STARTED          = 1 << 0,       /* started */
  73        WORKER_DIE              = 1 << 1,       /* die die die */
  74        WORKER_IDLE             = 1 << 2,       /* is idle */
  75        WORKER_PREP             = 1 << 3,       /* preparing to run works */
  76        WORKER_REBIND           = 1 << 5,       /* mom is home, come back */
  77        WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
  78        WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
  79
  80        WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_REBIND | WORKER_UNBOUND |
  81                                  WORKER_CPU_INTENSIVE,
  82
  83        NR_WORKER_POOLS         = 2,            /* # worker pools per gcwq */
  84
  85        BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
  86        BUSY_WORKER_HASH_SIZE   = 1 << BUSY_WORKER_HASH_ORDER,
  87        BUSY_WORKER_HASH_MASK   = BUSY_WORKER_HASH_SIZE - 1,
  88
  89        MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
  90        IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
  91
  92        MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
  93                                                /* call for help after 10ms
  94                                                   (min two ticks) */
  95        MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
  96        CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
  97
  98        /*
  99         * Rescue workers are used only on emergencies and shared by
 100         * all cpus.  Give -20.
 101         */
 102        RESCUER_NICE_LEVEL      = -20,
 103        HIGHPRI_NICE_LEVEL      = -20,
 104};
 105
 106/*
 107 * Structure fields follow one of the following exclusion rules.
 108 *
 109 * I: Modifiable by initialization/destruction paths and read-only for
 110 *    everyone else.
 111 *
 112 * P: Preemption protected.  Disabling preemption is enough and should
 113 *    only be modified and accessed from the local cpu.
 114 *
 115 * L: gcwq->lock protected.  Access with gcwq->lock held.
 116 *
 117 * X: During normal operation, modification requires gcwq->lock and
 118 *    should be done only from local cpu.  Either disabling preemption
 119 *    on local cpu or grabbing gcwq->lock is enough for read access.
 120 *    If GCWQ_DISASSOCIATED is set, it's identical to L.
 121 *
 122 * F: wq->flush_mutex protected.
 123 *
 124 * W: workqueue_lock protected.
 125 */
 126
 127struct global_cwq;
 128struct worker_pool;
 129struct idle_rebind;
 130
 131/*
 132 * The poor guys doing the actual heavy lifting.  All on-duty workers
 133 * are either serving the manager role, on idle list or on busy hash.
 134 */
 135struct worker {
 136        /* on idle list while idle, on busy hash table while busy */
 137        union {
 138                struct list_head        entry;  /* L: while idle */
 139                struct hlist_node       hentry; /* L: while busy */
 140        };
 141
 142        struct work_struct      *current_work;  /* L: work being processed */
 143        struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
 144        struct list_head        scheduled;      /* L: scheduled works */
 145        struct task_struct      *task;          /* I: worker task */
 146        struct worker_pool      *pool;          /* I: the associated pool */
 147        /* 64 bytes boundary on 64bit, 32 on 32bit */
 148        unsigned long           last_active;    /* L: last active timestamp */
 149        unsigned int            flags;          /* X: flags */
 150        int                     id;             /* I: worker id */
 151
 152        /* for rebinding worker to CPU */
 153        struct idle_rebind      *idle_rebind;   /* L: for idle worker */
 154        struct work_struct      rebind_work;    /* L: for busy worker */
 155};
 156
 157struct worker_pool {
 158        struct global_cwq       *gcwq;          /* I: the owning gcwq */
 159        unsigned int            flags;          /* X: flags */
 160
 161        struct list_head        worklist;       /* L: list of pending works */
 162        int                     nr_workers;     /* L: total number of workers */
 163        int                     nr_idle;        /* L: currently idle ones */
 164
 165        struct list_head        idle_list;      /* X: list of idle workers */
 166        struct timer_list       idle_timer;     /* L: worker idle timeout */
 167        struct timer_list       mayday_timer;   /* L: SOS timer for workers */
 168
 169        struct mutex            manager_mutex;  /* mutex manager should hold */
 170        struct ida              worker_ida;     /* L: for worker IDs */
 171};
 172
 173/*
 174 * Global per-cpu workqueue.  There's one and only one for each cpu
 175 * and all works are queued and processed here regardless of their
 176 * target workqueues.
 177 */
 178struct global_cwq {
 179        spinlock_t              lock;           /* the gcwq lock */
 180        unsigned int            cpu;            /* I: the associated cpu */
 181        unsigned int            flags;          /* L: GCWQ_* flags */
 182
 183        /* workers are chained either in busy_hash or pool idle_list */
 184        struct hlist_head       busy_hash[BUSY_WORKER_HASH_SIZE];
 185                                                /* L: hash of busy workers */
 186
 187        struct worker_pool      pools[2];       /* normal and highpri pools */
 188
 189        wait_queue_head_t       rebind_hold;    /* rebind hold wait */
 190} ____cacheline_aligned_in_smp;
 191
 192/*
 193 * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
 194 * work_struct->data are used for flags and thus cwqs need to be
 195 * aligned at two's power of the number of flag bits.
 196 */
 197struct cpu_workqueue_struct {
 198        struct worker_pool      *pool;          /* I: the associated pool */
 199        struct workqueue_struct *wq;            /* I: the owning workqueue */
 200        int                     work_color;     /* L: current color */
 201        int                     flush_color;    /* L: flushing color */
 202        int                     nr_in_flight[WORK_NR_COLORS];
 203                                                /* L: nr of in_flight works */
 204        int                     nr_active;      /* L: nr of active works */
 205        int                     max_active;     /* L: max active works */
 206        struct list_head        delayed_works;  /* L: delayed works */
 207};
 208
 209/*
 210 * Structure used to wait for workqueue flush.
 211 */
 212struct wq_flusher {
 213        struct list_head        list;           /* F: list of flushers */
 214        int                     flush_color;    /* F: flush color waiting for */
 215        struct completion       done;           /* flush completion */
 216};
 217
 218/*
 219 * All cpumasks are assumed to be always set on UP and thus can't be
 220 * used to determine whether there's something to be done.
 221 */
 222#ifdef CONFIG_SMP
 223typedef cpumask_var_t mayday_mask_t;
 224#define mayday_test_and_set_cpu(cpu, mask)      \
 225        cpumask_test_and_set_cpu((cpu), (mask))
 226#define mayday_clear_cpu(cpu, mask)             cpumask_clear_cpu((cpu), (mask))
 227#define for_each_mayday_cpu(cpu, mask)          for_each_cpu((cpu), (mask))
 228#define alloc_mayday_mask(maskp, gfp)           zalloc_cpumask_var((maskp), (gfp))
 229#define free_mayday_mask(mask)                  free_cpumask_var((mask))
 230#else
 231typedef unsigned long mayday_mask_t;
 232#define mayday_test_and_set_cpu(cpu, mask)      test_and_set_bit(0, &(mask))
 233#define mayday_clear_cpu(cpu, mask)             clear_bit(0, &(mask))
 234#define for_each_mayday_cpu(cpu, mask)          if ((cpu) = 0, (mask))
 235#define alloc_mayday_mask(maskp, gfp)           true
 236#define free_mayday_mask(mask)                  do { } while (0)
 237#endif
 238
 239/*
 240 * The externally visible workqueue abstraction is an array of
 241 * per-CPU workqueues:
 242 */
 243struct workqueue_struct {
 244        unsigned int            flags;          /* W: WQ_* flags */
 245        union {
 246                struct cpu_workqueue_struct __percpu    *pcpu;
 247                struct cpu_workqueue_struct             *single;
 248                unsigned long                           v;
 249        } cpu_wq;                               /* I: cwq's */
 250        struct list_head        list;           /* W: list of all workqueues */
 251
 252        struct mutex            flush_mutex;    /* protects wq flushing */
 253        int                     work_color;     /* F: current work color */
 254        int                     flush_color;    /* F: current flush color */
 255        atomic_t                nr_cwqs_to_flush; /* flush in progress */
 256        struct wq_flusher       *first_flusher; /* F: first flusher */
 257        struct list_head        flusher_queue;  /* F: flush waiters */
 258        struct list_head        flusher_overflow; /* F: flush overflow list */
 259
 260        mayday_mask_t           mayday_mask;    /* cpus requesting rescue */
 261        struct worker           *rescuer;       /* I: rescue worker */
 262
 263        int                     nr_drainers;    /* W: drain in progress */
 264        int                     saved_max_active; /* W: saved cwq max_active */
 265#ifdef CONFIG_LOCKDEP
 266        struct lockdep_map      lockdep_map;
 267#endif
 268        char                    name[];         /* I: workqueue name */
 269};
 270
 271struct workqueue_struct *system_wq __read_mostly;
 272struct workqueue_struct *system_long_wq __read_mostly;
 273struct workqueue_struct *system_nrt_wq __read_mostly;
 274struct workqueue_struct *system_unbound_wq __read_mostly;
 275struct workqueue_struct *system_freezable_wq __read_mostly;
 276struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
 277EXPORT_SYMBOL_GPL(system_wq);
 278EXPORT_SYMBOL_GPL(system_long_wq);
 279EXPORT_SYMBOL_GPL(system_nrt_wq);
 280EXPORT_SYMBOL_GPL(system_unbound_wq);
 281EXPORT_SYMBOL_GPL(system_freezable_wq);
 282EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
 283
 284#define CREATE_TRACE_POINTS
 285#include <trace/events/workqueue.h>
 286
 287#define for_each_worker_pool(pool, gcwq)                                \
 288        for ((pool) = &(gcwq)->pools[0];                                \
 289             (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
 290
 291#define for_each_busy_worker(worker, i, pos, gcwq)                      \
 292        for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)                     \
 293                hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
 294
 295static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
 296                                  unsigned int sw)
 297{
 298        if (cpu < nr_cpu_ids) {
 299                if (sw & 1) {
 300                        cpu = cpumask_next(cpu, mask);
 301                        if (cpu < nr_cpu_ids)
 302                                return cpu;
 303                }
 304                if (sw & 2)
 305                        return WORK_CPU_UNBOUND;
 306        }
 307        return WORK_CPU_NONE;
 308}
 309
 310static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
 311                                struct workqueue_struct *wq)
 312{
 313        return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
 314}
 315
 316/*
 317 * CPU iterators
 318 *
 319 * An extra gcwq is defined for an invalid cpu number
 320 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
 321 * specific CPU.  The following iterators are similar to
 322 * for_each_*_cpu() iterators but also considers the unbound gcwq.
 323 *
 324 * for_each_gcwq_cpu()          : possible CPUs + WORK_CPU_UNBOUND
 325 * for_each_online_gcwq_cpu()   : online CPUs + WORK_CPU_UNBOUND
 326 * for_each_cwq_cpu()           : possible CPUs for bound workqueues,
 327 *                                WORK_CPU_UNBOUND for unbound workqueues
 328 */
 329#define for_each_gcwq_cpu(cpu)                                          \
 330        for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);         \
 331             (cpu) < WORK_CPU_NONE;                                     \
 332             (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
 333
 334#define for_each_online_gcwq_cpu(cpu)                                   \
 335        for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);           \
 336             (cpu) < WORK_CPU_NONE;                                     \
 337             (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
 338
 339#define for_each_cwq_cpu(cpu, wq)                                       \
 340        for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));        \
 341             (cpu) < WORK_CPU_NONE;                                     \
 342             (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
 343
 344#ifdef CONFIG_DEBUG_OBJECTS_WORK
 345
 346static struct debug_obj_descr work_debug_descr;
 347
 348static void *work_debug_hint(void *addr)
 349{
 350        return ((struct work_struct *) addr)->func;
 351}
 352
 353/*
 354 * fixup_init is called when:
 355 * - an active object is initialized
 356 */
 357static int work_fixup_init(void *addr, enum debug_obj_state state)
 358{
 359        struct work_struct *work = addr;
 360
 361        switch (state) {
 362        case ODEBUG_STATE_ACTIVE:
 363                cancel_work_sync(work);
 364                debug_object_init(work, &work_debug_descr);
 365                return 1;
 366        default:
 367                return 0;
 368        }
 369}
 370
 371/*
 372 * fixup_activate is called when:
 373 * - an active object is activated
 374 * - an unknown object is activated (might be a statically initialized object)
 375 */
 376static int work_fixup_activate(void *addr, enum debug_obj_state state)
 377{
 378        struct work_struct *work = addr;
 379
 380        switch (state) {
 381
 382        case ODEBUG_STATE_NOTAVAILABLE:
 383                /*
 384                 * This is not really a fixup. The work struct was
 385                 * statically initialized. We just make sure that it
 386                 * is tracked in the object tracker.
 387                 */
 388                if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
 389                        debug_object_init(work, &work_debug_descr);
 390                        debug_object_activate(work, &work_debug_descr);
 391                        return 0;
 392                }
 393                WARN_ON_ONCE(1);
 394                return 0;
 395
 396        case ODEBUG_STATE_ACTIVE:
 397                WARN_ON(1);
 398
 399        default:
 400                return 0;
 401        }
 402}
 403
 404/*
 405 * fixup_free is called when:
 406 * - an active object is freed
 407 */
 408static int work_fixup_free(void *addr, enum debug_obj_state state)
 409{
 410        struct work_struct *work = addr;
 411
 412        switch (state) {
 413        case ODEBUG_STATE_ACTIVE:
 414                cancel_work_sync(work);
 415                debug_object_free(work, &work_debug_descr);
 416                return 1;
 417        default:
 418                return 0;
 419        }
 420}
 421
 422static struct debug_obj_descr work_debug_descr = {
 423        .name           = "work_struct",
 424        .debug_hint     = work_debug_hint,
 425        .fixup_init     = work_fixup_init,
 426        .fixup_activate = work_fixup_activate,
 427        .fixup_free     = work_fixup_free,
 428};
 429
 430static inline void debug_work_activate(struct work_struct *work)
 431{
 432        debug_object_activate(work, &work_debug_descr);
 433}
 434
 435static inline void debug_work_deactivate(struct work_struct *work)
 436{
 437        debug_object_deactivate(work, &work_debug_descr);
 438}
 439
 440void __init_work(struct work_struct *work, int onstack)
 441{
 442        if (onstack)
 443                debug_object_init_on_stack(work, &work_debug_descr);
 444        else
 445                debug_object_init(work, &work_debug_descr);
 446}
 447EXPORT_SYMBOL_GPL(__init_work);
 448
 449void destroy_work_on_stack(struct work_struct *work)
 450{
 451        debug_object_free(work, &work_debug_descr);
 452}
 453EXPORT_SYMBOL_GPL(destroy_work_on_stack);
 454
 455#else
 456static inline void debug_work_activate(struct work_struct *work) { }
 457static inline void debug_work_deactivate(struct work_struct *work) { }
 458#endif
 459
 460/* Serializes the accesses to the list of workqueues. */
 461static DEFINE_SPINLOCK(workqueue_lock);
 462static LIST_HEAD(workqueues);
 463static bool workqueue_freezing;         /* W: have wqs started freezing? */
 464
 465/*
 466 * The almighty global cpu workqueues.  nr_running is the only field
 467 * which is expected to be used frequently by other cpus via
 468 * try_to_wake_up().  Put it in a separate cacheline.
 469 */
 470static DEFINE_PER_CPU(struct global_cwq, global_cwq);
 471static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]);
 472
 473/*
 474 * Global cpu workqueue and nr_running counter for unbound gcwq.  The
 475 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
 476 * workers have WORKER_UNBOUND set.
 477 */
 478static struct global_cwq unbound_global_cwq;
 479static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
 480        [0 ... NR_WORKER_POOLS - 1]     = ATOMIC_INIT(0),       /* always 0 */
 481};
 482
 483static int worker_thread(void *__worker);
 484
 485static int worker_pool_pri(struct worker_pool *pool)
 486{
 487        return pool - pool->gcwq->pools;
 488}
 489
 490static struct global_cwq *get_gcwq(unsigned int cpu)
 491{
 492        if (cpu != WORK_CPU_UNBOUND)
 493                return &per_cpu(global_cwq, cpu);
 494        else
 495                return &unbound_global_cwq;
 496}
 497
 498static atomic_t *get_pool_nr_running(struct worker_pool *pool)
 499{
 500        int cpu = pool->gcwq->cpu;
 501        int idx = worker_pool_pri(pool);
 502
 503        if (cpu != WORK_CPU_UNBOUND)
 504                return &per_cpu(pool_nr_running, cpu)[idx];
 505        else
 506                return &unbound_pool_nr_running[idx];
 507}
 508
 509static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
 510                                            struct workqueue_struct *wq)
 511{
 512        if (!(wq->flags & WQ_UNBOUND)) {
 513                if (likely(cpu < nr_cpu_ids))
 514                        return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
 515        } else if (likely(cpu == WORK_CPU_UNBOUND))
 516                return wq->cpu_wq.single;
 517        return NULL;
 518}
 519
 520static unsigned int work_color_to_flags(int color)
 521{
 522        return color << WORK_STRUCT_COLOR_SHIFT;
 523}
 524
 525static int get_work_color(struct work_struct *work)
 526{
 527        return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
 528                ((1 << WORK_STRUCT_COLOR_BITS) - 1);
 529}
 530
 531static int work_next_color(int color)
 532{
 533        return (color + 1) % WORK_NR_COLORS;
 534}
 535
 536/*
 537 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
 538 * work is on queue.  Once execution starts, WORK_STRUCT_CWQ is
 539 * cleared and the work data contains the cpu number it was last on.
 540 *
 541 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
 542 * cwq, cpu or clear work->data.  These functions should only be
 543 * called while the work is owned - ie. while the PENDING bit is set.
 544 *
 545 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
 546 * corresponding to a work.  gcwq is available once the work has been
 547 * queued anywhere after initialization.  cwq is available only from
 548 * queueing until execution starts.
 549 */
 550static inline void set_work_data(struct work_struct *work, unsigned long data,
 551                                 unsigned long flags)
 552{
 553        BUG_ON(!work_pending(work));
 554        atomic_long_set(&work->data, data | flags | work_static(work));
 555}
 556
 557static void set_work_cwq(struct work_struct *work,
 558                         struct cpu_workqueue_struct *cwq,
 559                         unsigned long extra_flags)
 560{
 561        set_work_data(work, (unsigned long)cwq,
 562                      WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
 563}
 564
 565static void set_work_cpu(struct work_struct *work, unsigned int cpu)
 566{
 567        set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
 568}
 569
 570static void clear_work_data(struct work_struct *work)
 571{
 572        set_work_data(work, WORK_STRUCT_NO_CPU, 0);
 573}
 574
 575static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
 576{
 577        unsigned long data = atomic_long_read(&work->data);
 578
 579        if (data & WORK_STRUCT_CWQ)
 580                return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
 581        else
 582                return NULL;
 583}
 584
 585static struct global_cwq *get_work_gcwq(struct work_struct *work)
 586{
 587        unsigned long data = atomic_long_read(&work->data);
 588        unsigned int cpu;
 589
 590        if (data & WORK_STRUCT_CWQ)
 591                return ((struct cpu_workqueue_struct *)
 592                        (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
 593
 594        cpu = data >> WORK_STRUCT_FLAG_BITS;
 595        if (cpu == WORK_CPU_NONE)
 596                return NULL;
 597
 598        BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
 599        return get_gcwq(cpu);
 600}
 601
 602/*
 603 * Policy functions.  These define the policies on how the global worker
 604 * pools are managed.  Unless noted otherwise, these functions assume that
 605 * they're being called with gcwq->lock held.
 606 */
 607
 608static bool __need_more_worker(struct worker_pool *pool)
 609{
 610        return !atomic_read(get_pool_nr_running(pool));
 611}
 612
 613/*
 614 * Need to wake up a worker?  Called from anything but currently
 615 * running workers.
 616 *
 617 * Note that, because unbound workers never contribute to nr_running, this
 618 * function will always return %true for unbound gcwq as long as the
 619 * worklist isn't empty.
 620 */
 621static bool need_more_worker(struct worker_pool *pool)
 622{
 623        return !list_empty(&pool->worklist) && __need_more_worker(pool);
 624}
 625
 626/* Can I start working?  Called from busy but !running workers. */
 627static bool may_start_working(struct worker_pool *pool)
 628{
 629        return pool->nr_idle;
 630}
 631
 632/* Do I need to keep working?  Called from currently running workers. */
 633static bool keep_working(struct worker_pool *pool)
 634{
 635        atomic_t *nr_running = get_pool_nr_running(pool);
 636
 637        return !list_empty(&pool->worklist) && atomic_read(nr_running) <= 1;
 638}
 639
 640/* Do we need a new worker?  Called from manager. */
 641static bool need_to_create_worker(struct worker_pool *pool)
 642{
 643        return need_more_worker(pool) && !may_start_working(pool);
 644}
 645
 646/* Do I need to be the manager? */
 647static bool need_to_manage_workers(struct worker_pool *pool)
 648{
 649        return need_to_create_worker(pool) ||
 650                (pool->flags & POOL_MANAGE_WORKERS);
 651}
 652
 653/* Do we have too many workers and should some go away? */
 654static bool too_many_workers(struct worker_pool *pool)
 655{
 656        bool managing = pool->flags & POOL_MANAGING_WORKERS;
 657        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
 658        int nr_busy = pool->nr_workers - nr_idle;
 659
 660        return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 661}
 662
 663/*
 664 * Wake up functions.
 665 */
 666
 667/* Return the first worker.  Safe with preemption disabled */
 668static struct worker *first_worker(struct worker_pool *pool)
 669{
 670        if (unlikely(list_empty(&pool->idle_list)))
 671                return NULL;
 672
 673        return list_first_entry(&pool->idle_list, struct worker, entry);
 674}
 675
 676/**
 677 * wake_up_worker - wake up an idle worker
 678 * @pool: worker pool to wake worker from
 679 *
 680 * Wake up the first idle worker of @pool.
 681 *
 682 * CONTEXT:
 683 * spin_lock_irq(gcwq->lock).
 684 */
 685static void wake_up_worker(struct worker_pool *pool)
 686{
 687        struct worker *worker = first_worker(pool);
 688
 689        if (likely(worker))
 690                wake_up_process(worker->task);
 691}
 692
 693/**
 694 * wq_worker_waking_up - a worker is waking up
 695 * @task: task waking up
 696 * @cpu: CPU @task is waking up to
 697 *
 698 * This function is called during try_to_wake_up() when a worker is
 699 * being awoken.
 700 *
 701 * CONTEXT:
 702 * spin_lock_irq(rq->lock)
 703 */
 704void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
 705{
 706        struct worker *worker = kthread_data(task);
 707
 708        if (!(worker->flags & WORKER_NOT_RUNNING))
 709                atomic_inc(get_pool_nr_running(worker->pool));
 710}
 711
 712/**
 713 * wq_worker_sleeping - a worker is going to sleep
 714 * @task: task going to sleep
 715 * @cpu: CPU in question, must be the current CPU number
 716 *
 717 * This function is called during schedule() when a busy worker is
 718 * going to sleep.  Worker on the same cpu can be woken up by
 719 * returning pointer to its task.
 720 *
 721 * CONTEXT:
 722 * spin_lock_irq(rq->lock)
 723 *
 724 * RETURNS:
 725 * Worker task on @cpu to wake up, %NULL if none.
 726 */
 727struct task_struct *wq_worker_sleeping(struct task_struct *task,
 728                                       unsigned int cpu)
 729{
 730        struct worker *worker = kthread_data(task), *to_wakeup = NULL;
 731        struct worker_pool *pool = worker->pool;
 732        atomic_t *nr_running = get_pool_nr_running(pool);
 733
 734        if (worker->flags & WORKER_NOT_RUNNING)
 735                return NULL;
 736
 737        /* this can only happen on the local cpu */
 738        BUG_ON(cpu != raw_smp_processor_id());
 739
 740        /*
 741         * The counterpart of the following dec_and_test, implied mb,
 742         * worklist not empty test sequence is in insert_work().
 743         * Please read comment there.
 744         *
 745         * NOT_RUNNING is clear.  This means that we're bound to and
 746         * running on the local cpu w/ rq lock held and preemption
 747         * disabled, which in turn means that none else could be
 748         * manipulating idle_list, so dereferencing idle_list without gcwq
 749         * lock is safe.
 750         */
 751        if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist))
 752                to_wakeup = first_worker(pool);
 753        return to_wakeup ? to_wakeup->task : NULL;
 754}
 755
 756/**
 757 * worker_set_flags - set worker flags and adjust nr_running accordingly
 758 * @worker: self
 759 * @flags: flags to set
 760 * @wakeup: wakeup an idle worker if necessary
 761 *
 762 * Set @flags in @worker->flags and adjust nr_running accordingly.  If
 763 * nr_running becomes zero and @wakeup is %true, an idle worker is
 764 * woken up.
 765 *
 766 * CONTEXT:
 767 * spin_lock_irq(gcwq->lock)
 768 */
 769static inline void worker_set_flags(struct worker *worker, unsigned int flags,
 770                                    bool wakeup)
 771{
 772        struct worker_pool *pool = worker->pool;
 773
 774        WARN_ON_ONCE(worker->task != current);
 775
 776        /*
 777         * If transitioning into NOT_RUNNING, adjust nr_running and
 778         * wake up an idle worker as necessary if requested by
 779         * @wakeup.
 780         */
 781        if ((flags & WORKER_NOT_RUNNING) &&
 782            !(worker->flags & WORKER_NOT_RUNNING)) {
 783                atomic_t *nr_running = get_pool_nr_running(pool);
 784
 785                if (wakeup) {
 786                        if (atomic_dec_and_test(nr_running) &&
 787                            !list_empty(&pool->worklist))
 788                                wake_up_worker(pool);
 789                } else
 790                        atomic_dec(nr_running);
 791        }
 792
 793        worker->flags |= flags;
 794}
 795
 796/**
 797 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
 798 * @worker: self
 799 * @flags: flags to clear
 800 *
 801 * Clear @flags in @worker->flags and adjust nr_running accordingly.
 802 *
 803 * CONTEXT:
 804 * spin_lock_irq(gcwq->lock)
 805 */
 806static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 807{
 808        struct worker_pool *pool = worker->pool;
 809        unsigned int oflags = worker->flags;
 810
 811        WARN_ON_ONCE(worker->task != current);
 812
 813        worker->flags &= ~flags;
 814
 815        /*
 816         * If transitioning out of NOT_RUNNING, increment nr_running.  Note
 817         * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
 818         * of multiple flags, not a single flag.
 819         */
 820        if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
 821                if (!(worker->flags & WORKER_NOT_RUNNING))
 822                        atomic_inc(get_pool_nr_running(pool));
 823}
 824
 825/**
 826 * busy_worker_head - return the busy hash head for a work
 827 * @gcwq: gcwq of interest
 828 * @work: work to be hashed
 829 *
 830 * Return hash head of @gcwq for @work.
 831 *
 832 * CONTEXT:
 833 * spin_lock_irq(gcwq->lock).
 834 *
 835 * RETURNS:
 836 * Pointer to the hash head.
 837 */
 838static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
 839                                           struct work_struct *work)
 840{
 841        const int base_shift = ilog2(sizeof(struct work_struct));
 842        unsigned long v = (unsigned long)work;
 843
 844        /* simple shift and fold hash, do we need something better? */
 845        v >>= base_shift;
 846        v += v >> BUSY_WORKER_HASH_ORDER;
 847        v &= BUSY_WORKER_HASH_MASK;
 848
 849        return &gcwq->busy_hash[v];
 850}
 851
 852/**
 853 * __find_worker_executing_work - find worker which is executing a work
 854 * @gcwq: gcwq of interest
 855 * @bwh: hash head as returned by busy_worker_head()
 856 * @work: work to find worker for
 857 *
 858 * Find a worker which is executing @work on @gcwq.  @bwh should be
 859 * the hash head obtained by calling busy_worker_head() with the same
 860 * work.
 861 *
 862 * CONTEXT:
 863 * spin_lock_irq(gcwq->lock).
 864 *
 865 * RETURNS:
 866 * Pointer to worker which is executing @work if found, NULL
 867 * otherwise.
 868 */
 869static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
 870                                                   struct hlist_head *bwh,
 871                                                   struct work_struct *work)
 872{
 873        struct worker *worker;
 874        struct hlist_node *tmp;
 875
 876        hlist_for_each_entry(worker, tmp, bwh, hentry)
 877                if (worker->current_work == work)
 878                        return worker;
 879        return NULL;
 880}
 881
 882/**
 883 * find_worker_executing_work - find worker which is executing a work
 884 * @gcwq: gcwq of interest
 885 * @work: work to find worker for
 886 *
 887 * Find a worker which is executing @work on @gcwq.  This function is
 888 * identical to __find_worker_executing_work() except that this
 889 * function calculates @bwh itself.
 890 *
 891 * CONTEXT:
 892 * spin_lock_irq(gcwq->lock).
 893 *
 894 * RETURNS:
 895 * Pointer to worker which is executing @work if found, NULL
 896 * otherwise.
 897 */
 898static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
 899                                                 struct work_struct *work)
 900{
 901        return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
 902                                            work);
 903}
 904
 905/**
 906 * insert_work - insert a work into gcwq
 907 * @cwq: cwq @work belongs to
 908 * @work: work to insert
 909 * @head: insertion point
 910 * @extra_flags: extra WORK_STRUCT_* flags to set
 911 *
 912 * Insert @work which belongs to @cwq into @gcwq after @head.
 913 * @extra_flags is or'd to work_struct flags.
 914 *
 915 * CONTEXT:
 916 * spin_lock_irq(gcwq->lock).
 917 */
 918static void insert_work(struct cpu_workqueue_struct *cwq,
 919                        struct work_struct *work, struct list_head *head,
 920                        unsigned int extra_flags)
 921{
 922        struct worker_pool *pool = cwq->pool;
 923
 924        /* we own @work, set data and link */
 925        set_work_cwq(work, cwq, extra_flags);
 926
 927        /*
 928         * Ensure that we get the right work->data if we see the
 929         * result of list_add() below, see try_to_grab_pending().
 930         */
 931        smp_wmb();
 932
 933        list_add_tail(&work->entry, head);
 934
 935        /*
 936         * Ensure either worker_sched_deactivated() sees the above
 937         * list_add_tail() or we see zero nr_running to avoid workers
 938         * lying around lazily while there are works to be processed.
 939         */
 940        smp_mb();
 941
 942        if (__need_more_worker(pool))
 943                wake_up_worker(pool);
 944}
 945
 946/*
 947 * Test whether @work is being queued from another work executing on the
 948 * same workqueue.  This is rather expensive and should only be used from
 949 * cold paths.
 950 */
 951static bool is_chained_work(struct workqueue_struct *wq)
 952{
 953        unsigned long flags;
 954        unsigned int cpu;
 955
 956        for_each_gcwq_cpu(cpu) {
 957                struct global_cwq *gcwq = get_gcwq(cpu);
 958                struct worker *worker;
 959                struct hlist_node *pos;
 960                int i;
 961
 962                spin_lock_irqsave(&gcwq->lock, flags);
 963                for_each_busy_worker(worker, i, pos, gcwq) {
 964                        if (worker->task != current)
 965                                continue;
 966                        spin_unlock_irqrestore(&gcwq->lock, flags);
 967                        /*
 968                         * I'm @worker, no locking necessary.  See if @work
 969                         * is headed to the same workqueue.
 970                         */
 971                        return worker->current_cwq->wq == wq;
 972                }
 973                spin_unlock_irqrestore(&gcwq->lock, flags);
 974        }
 975        return false;
 976}
 977
 978static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 979                         struct work_struct *work)
 980{
 981        struct global_cwq *gcwq;
 982        struct cpu_workqueue_struct *cwq;
 983        struct list_head *worklist;
 984        unsigned int work_flags;
 985        unsigned long flags;
 986
 987        debug_work_activate(work);
 988
 989        /* if dying, only works from the same workqueue are allowed */
 990        if (unlikely(wq->flags & WQ_DRAINING) &&
 991            WARN_ON_ONCE(!is_chained_work(wq)))
 992                return;
 993
 994        /* determine gcwq to use */
 995        if (!(wq->flags & WQ_UNBOUND)) {
 996                struct global_cwq *last_gcwq;
 997
 998                if (unlikely(cpu == WORK_CPU_UNBOUND))
 999                        cpu = raw_smp_processor_id();
1000
1001                /*
1002                 * It's multi cpu.  If @wq is non-reentrant and @work
1003                 * was previously on a different cpu, it might still
1004                 * be running there, in which case the work needs to
1005                 * be queued on that cpu to guarantee non-reentrance.
1006                 */
1007                gcwq = get_gcwq(cpu);
1008                if (wq->flags & WQ_NON_REENTRANT &&
1009                    (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
1010                        struct worker *worker;
1011
1012                        spin_lock_irqsave(&last_gcwq->lock, flags);
1013
1014                        worker = find_worker_executing_work(last_gcwq, work);
1015
1016                        if (worker && worker->current_cwq->wq == wq)
1017                                gcwq = last_gcwq;
1018                        else {
1019                                /* meh... not running there, queue here */
1020                                spin_unlock_irqrestore(&last_gcwq->lock, flags);
1021                                spin_lock_irqsave(&gcwq->lock, flags);
1022                        }
1023                } else
1024                        spin_lock_irqsave(&gcwq->lock, flags);
1025        } else {
1026                gcwq = get_gcwq(WORK_CPU_UNBOUND);
1027                spin_lock_irqsave(&gcwq->lock, flags);
1028        }
1029
1030        /* gcwq determined, get cwq and queue */
1031        cwq = get_cwq(gcwq->cpu, wq);
1032        trace_workqueue_queue_work(cpu, cwq, work);
1033
1034        if (WARN_ON(!list_empty(&work->entry))) {
1035                spin_unlock_irqrestore(&gcwq->lock, flags);
1036                return;
1037        }
1038
1039        cwq->nr_in_flight[cwq->work_color]++;
1040        work_flags = work_color_to_flags(cwq->work_color);
1041
1042        if (likely(cwq->nr_active < cwq->max_active)) {
1043                trace_workqueue_activate_work(work);
1044                cwq->nr_active++;
1045                worklist = &cwq->pool->worklist;
1046        } else {
1047                work_flags |= WORK_STRUCT_DELAYED;
1048                worklist = &cwq->delayed_works;
1049        }
1050
1051        insert_work(cwq, work, worklist, work_flags);
1052
1053        spin_unlock_irqrestore(&gcwq->lock, flags);
1054}
1055
1056/**
1057 * queue_work - queue work on a workqueue
1058 * @wq: workqueue to use
1059 * @work: work to queue
1060 *
1061 * Returns 0 if @work was already on a queue, non-zero otherwise.
1062 *
1063 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1064 * it can be processed by another CPU.
1065 */
1066int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1067{
1068        int ret;
1069
1070        ret = queue_work_on(get_cpu(), wq, work);
1071        put_cpu();
1072
1073        return ret;
1074}
1075EXPORT_SYMBOL_GPL(queue_work);
1076
1077/**
1078 * queue_work_on - queue work on specific cpu
1079 * @cpu: CPU number to execute work on
1080 * @wq: workqueue to use
1081 * @work: work to queue
1082 *
1083 * Returns 0 if @work was already on a queue, non-zero otherwise.
1084 *
1085 * We queue the work to a specific CPU, the caller must ensure it
1086 * can't go away.
1087 */
1088int
1089queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
1090{
1091        int ret = 0;
1092
1093        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1094                __queue_work(cpu, wq, work);
1095                ret = 1;
1096        }
1097        return ret;
1098}
1099EXPORT_SYMBOL_GPL(queue_work_on);
1100
1101static void delayed_work_timer_fn(unsigned long __data)
1102{
1103        struct delayed_work *dwork = (struct delayed_work *)__data;
1104        struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1105
1106        __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
1107}
1108
1109/**
1110 * queue_delayed_work - queue work on a workqueue after delay
1111 * @wq: workqueue to use
1112 * @dwork: delayable work to queue
1113 * @delay: number of jiffies to wait before queueing
1114 *
1115 * Returns 0 if @work was already on a queue, non-zero otherwise.
1116 */
1117int queue_delayed_work(struct workqueue_struct *wq,
1118                        struct delayed_work *dwork, unsigned long delay)
1119{
1120        if (delay == 0)
1121                return queue_work(wq, &dwork->work);
1122
1123        return queue_delayed_work_on(-1, wq, dwork, delay);
1124}
1125EXPORT_SYMBOL_GPL(queue_delayed_work);
1126
1127/**
1128 * queue_delayed_work_on - queue work on specific CPU after delay
1129 * @cpu: CPU number to execute work on
1130 * @wq: workqueue to use
1131 * @dwork: work to queue
1132 * @delay: number of jiffies to wait before queueing
1133 *
1134 * Returns 0 if @work was already on a queue, non-zero otherwise.
1135 */
1136int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1137                        struct delayed_work *dwork, unsigned long delay)
1138{
1139        int ret = 0;
1140        struct timer_list *timer = &dwork->timer;
1141        struct work_struct *work = &dwork->work;
1142
1143        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1144                unsigned int lcpu;
1145
1146                BUG_ON(timer_pending(timer));
1147                BUG_ON(!list_empty(&work->entry));
1148
1149                timer_stats_timer_set_start_info(&dwork->timer);
1150
1151                /*
1152                 * This stores cwq for the moment, for the timer_fn.
1153                 * Note that the work's gcwq is preserved to allow
1154                 * reentrance detection for delayed works.
1155                 */
1156                if (!(wq->flags & WQ_UNBOUND)) {
1157                        struct global_cwq *gcwq = get_work_gcwq(work);
1158
1159                        if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1160                                lcpu = gcwq->cpu;
1161                        else
1162                                lcpu = raw_smp_processor_id();
1163                } else
1164                        lcpu = WORK_CPU_UNBOUND;
1165
1166                set_work_cwq(work, get_cwq(lcpu, wq), 0);
1167
1168                timer->expires = jiffies + delay;
1169                timer->data = (unsigned long)dwork;
1170                timer->function = delayed_work_timer_fn;
1171
1172                if (unlikely(cpu >= 0))
1173                        add_timer_on(timer, cpu);
1174                else
1175                        add_timer(timer);
1176                ret = 1;
1177        }
1178        return ret;
1179}
1180EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1181
1182/**
1183 * worker_enter_idle - enter idle state
1184 * @worker: worker which is entering idle state
1185 *
1186 * @worker is entering idle state.  Update stats and idle timer if
1187 * necessary.
1188 *
1189 * LOCKING:
1190 * spin_lock_irq(gcwq->lock).
1191 */
1192static void worker_enter_idle(struct worker *worker)
1193{
1194        struct worker_pool *pool = worker->pool;
1195        struct global_cwq *gcwq = pool->gcwq;
1196
1197        BUG_ON(worker->flags & WORKER_IDLE);
1198        BUG_ON(!list_empty(&worker->entry) &&
1199               (worker->hentry.next || worker->hentry.pprev));
1200
1201        /* can't use worker_set_flags(), also called from start_worker() */
1202        worker->flags |= WORKER_IDLE;
1203        pool->nr_idle++;
1204        worker->last_active = jiffies;
1205
1206        /* idle_list is LIFO */
1207        list_add(&worker->entry, &pool->idle_list);
1208
1209        if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1210                mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1211
1212        /*
1213         * Sanity check nr_running.  Because gcwq_unbind_fn() releases
1214         * gcwq->lock between setting %WORKER_UNBOUND and zapping
1215         * nr_running, the warning may trigger spuriously.  Check iff
1216         * unbind is not in progress.
1217         */
1218        WARN_ON_ONCE(!(gcwq->flags & GCWQ_DISASSOCIATED) &&
1219                     pool->nr_workers == pool->nr_idle &&
1220                     atomic_read(get_pool_nr_running(pool)));
1221}
1222
1223/**
1224 * worker_leave_idle - leave idle state
1225 * @worker: worker which is leaving idle state
1226 *
1227 * @worker is leaving idle state.  Update stats.
1228 *
1229 * LOCKING:
1230 * spin_lock_irq(gcwq->lock).
1231 */
1232static void worker_leave_idle(struct worker *worker)
1233{
1234        struct worker_pool *pool = worker->pool;
1235
1236        BUG_ON(!(worker->flags & WORKER_IDLE));
1237        worker_clr_flags(worker, WORKER_IDLE);
1238        pool->nr_idle--;
1239        list_del_init(&worker->entry);
1240}
1241
1242/**
1243 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1244 * @worker: self
1245 *
1246 * Works which are scheduled while the cpu is online must at least be
1247 * scheduled to a worker which is bound to the cpu so that if they are
1248 * flushed from cpu callbacks while cpu is going down, they are
1249 * guaranteed to execute on the cpu.
1250 *
1251 * This function is to be used by rogue workers and rescuers to bind
1252 * themselves to the target cpu and may race with cpu going down or
1253 * coming online.  kthread_bind() can't be used because it may put the
1254 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1255 * verbatim as it's best effort and blocking and gcwq may be
1256 * [dis]associated in the meantime.
1257 *
1258 * This function tries set_cpus_allowed() and locks gcwq and verifies the
1259 * binding against %GCWQ_DISASSOCIATED which is set during
1260 * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
1261 * enters idle state or fetches works without dropping lock, it can
1262 * guarantee the scheduling requirement described in the first paragraph.
1263 *
1264 * CONTEXT:
1265 * Might sleep.  Called without any lock but returns with gcwq->lock
1266 * held.
1267 *
1268 * RETURNS:
1269 * %true if the associated gcwq is online (@worker is successfully
1270 * bound), %false if offline.
1271 */
1272static bool worker_maybe_bind_and_lock(struct worker *worker)
1273__acquires(&gcwq->lock)
1274{
1275        struct global_cwq *gcwq = worker->pool->gcwq;
1276        struct task_struct *task = worker->task;
1277
1278        while (true) {
1279                /*
1280                 * The following call may fail, succeed or succeed
1281                 * without actually migrating the task to the cpu if
1282                 * it races with cpu hotunplug operation.  Verify
1283                 * against GCWQ_DISASSOCIATED.
1284                 */
1285                if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1286                        set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1287
1288                spin_lock_irq(&gcwq->lock);
1289                if (gcwq->flags & GCWQ_DISASSOCIATED)
1290                        return false;
1291                if (task_cpu(task) == gcwq->cpu &&
1292                    cpumask_equal(&current->cpus_allowed,
1293                                  get_cpu_mask(gcwq->cpu)))
1294                        return true;
1295                spin_unlock_irq(&gcwq->lock);
1296
1297                /*
1298                 * We've raced with CPU hot[un]plug.  Give it a breather
1299                 * and retry migration.  cond_resched() is required here;
1300                 * otherwise, we might deadlock against cpu_stop trying to
1301                 * bring down the CPU on non-preemptive kernel.
1302                 */
1303                cpu_relax();
1304                cond_resched();
1305        }
1306}
1307
1308struct idle_rebind {
1309        int                     cnt;            /* # workers to be rebound */
1310        struct completion       done;           /* all workers rebound */
1311};
1312
1313/*
1314 * Rebind an idle @worker to its CPU.  During CPU onlining, this has to
1315 * happen synchronously for idle workers.  worker_thread() will test
1316 * %WORKER_REBIND before leaving idle and call this function.
1317 */
1318static void idle_worker_rebind(struct worker *worker)
1319{
1320        struct global_cwq *gcwq = worker->pool->gcwq;
1321
1322        /* CPU must be online at this point */
1323        WARN_ON(!worker_maybe_bind_and_lock(worker));
1324        if (!--worker->idle_rebind->cnt)
1325                complete(&worker->idle_rebind->done);
1326        spin_unlock_irq(&worker->pool->gcwq->lock);
1327
1328        /* we did our part, wait for rebind_workers() to finish up */
1329        wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND));
1330
1331        /*
1332         * rebind_workers() shouldn't finish until all workers passed the
1333         * above WORKER_REBIND wait.  Tell it when done.
1334         */
1335        spin_lock_irq(&worker->pool->gcwq->lock);
1336        if (!--worker->idle_rebind->cnt)
1337                complete(&worker->idle_rebind->done);
1338        spin_unlock_irq(&worker->pool->gcwq->lock);
1339}
1340
1341/*
1342 * Function for @worker->rebind.work used to rebind unbound busy workers to
1343 * the associated cpu which is coming back online.  This is scheduled by
1344 * cpu up but can race with other cpu hotplug operations and may be
1345 * executed twice without intervening cpu down.
1346 */
1347static void busy_worker_rebind_fn(struct work_struct *work)
1348{
1349        struct worker *worker = container_of(work, struct worker, rebind_work);
1350        struct global_cwq *gcwq = worker->pool->gcwq;
1351
1352        worker_maybe_bind_and_lock(worker);
1353
1354        /*
1355         * %WORKER_REBIND must be cleared even if the above binding failed;
1356         * otherwise, we may confuse the next CPU_UP cycle or oops / get
1357         * stuck by calling idle_worker_rebind() prematurely.  If CPU went
1358         * down again inbetween, %WORKER_UNBOUND would be set, so clearing
1359         * %WORKER_REBIND is always safe.
1360         */
1361        worker_clr_flags(worker, WORKER_REBIND);
1362
1363        spin_unlock_irq(&gcwq->lock);
1364}
1365
1366/**
1367 * rebind_workers - rebind all workers of a gcwq to the associated CPU
1368 * @gcwq: gcwq of interest
1369 *
1370 * @gcwq->cpu is coming online.  Rebind all workers to the CPU.  Rebinding
1371 * is different for idle and busy ones.
1372 *
1373 * The idle ones should be rebound synchronously and idle rebinding should
1374 * be complete before any worker starts executing work items with
1375 * concurrency management enabled; otherwise, scheduler may oops trying to
1376 * wake up non-local idle worker from wq_worker_sleeping().
1377 *
1378 * This is achieved by repeatedly requesting rebinding until all idle
1379 * workers are known to have been rebound under @gcwq->lock and holding all
1380 * idle workers from becoming busy until idle rebinding is complete.
1381 *
1382 * Once idle workers are rebound, busy workers can be rebound as they
1383 * finish executing their current work items.  Queueing the rebind work at
1384 * the head of their scheduled lists is enough.  Note that nr_running will
1385 * be properbly bumped as busy workers rebind.
1386 *
1387 * On return, all workers are guaranteed to either be bound or have rebind
1388 * work item scheduled.
1389 */
1390static void rebind_workers(struct global_cwq *gcwq)
1391        __releases(&gcwq->lock) __acquires(&gcwq->lock)
1392{
1393        struct idle_rebind idle_rebind;
1394        struct worker_pool *pool;
1395        struct worker *worker;
1396        struct hlist_node *pos;
1397        int i;
1398
1399        lockdep_assert_held(&gcwq->lock);
1400
1401        for_each_worker_pool(pool, gcwq)
1402                lockdep_assert_held(&pool->manager_mutex);
1403
1404        /*
1405         * Rebind idle workers.  Interlocked both ways.  We wait for
1406         * workers to rebind via @idle_rebind.done.  Workers will wait for
1407         * us to finish up by watching %WORKER_REBIND.
1408         */
1409        init_completion(&idle_rebind.done);
1410retry:
1411        idle_rebind.cnt = 1;
1412        INIT_COMPLETION(idle_rebind.done);
1413
1414        /* set REBIND and kick idle ones, we'll wait for these later */
1415        for_each_worker_pool(pool, gcwq) {
1416                list_for_each_entry(worker, &pool->idle_list, entry) {
1417                        unsigned long worker_flags = worker->flags;
1418
1419                        if (worker->flags & WORKER_REBIND)
1420                                continue;
1421
1422                        /* morph UNBOUND to REBIND atomically */
1423                        worker_flags &= ~WORKER_UNBOUND;
1424                        worker_flags |= WORKER_REBIND;
1425                        ACCESS_ONCE(worker->flags) = worker_flags;
1426
1427                        idle_rebind.cnt++;
1428                        worker->idle_rebind = &idle_rebind;
1429
1430                        /* worker_thread() will call idle_worker_rebind() */
1431                        wake_up_process(worker->task);
1432                }
1433        }
1434
1435        if (--idle_rebind.cnt) {
1436                spin_unlock_irq(&gcwq->lock);
1437                wait_for_completion(&idle_rebind.done);
1438                spin_lock_irq(&gcwq->lock);
1439                /* busy ones might have become idle while waiting, retry */
1440                goto retry;
1441        }
1442
1443        /* all idle workers are rebound, rebind busy workers */
1444        for_each_busy_worker(worker, i, pos, gcwq) {
1445                struct work_struct *rebind_work = &worker->rebind_work;
1446                unsigned long worker_flags = worker->flags;
1447
1448                /* morph UNBOUND to REBIND atomically */
1449                worker_flags &= ~WORKER_UNBOUND;
1450                worker_flags |= WORKER_REBIND;
1451                ACCESS_ONCE(worker->flags) = worker_flags;
1452
1453                if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
1454                                     work_data_bits(rebind_work)))
1455                        continue;
1456
1457                /* wq doesn't matter, use the default one */
1458                debug_work_activate(rebind_work);
1459                insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
1460                            worker->scheduled.next,
1461                            work_color_to_flags(WORK_NO_COLOR));
1462        }
1463
1464        /*
1465         * All idle workers are rebound and waiting for %WORKER_REBIND to
1466         * be cleared inside idle_worker_rebind().  Clear and release.
1467         * Clearing %WORKER_REBIND from this foreign context is safe
1468         * because these workers are still guaranteed to be idle.
1469         *
1470         * We need to make sure all idle workers passed WORKER_REBIND wait
1471         * in idle_worker_rebind() before returning; otherwise, workers can
1472         * get stuck at the wait if hotplug cycle repeats.
1473         */
1474        idle_rebind.cnt = 1;
1475        INIT_COMPLETION(idle_rebind.done);
1476
1477        for_each_worker_pool(pool, gcwq) {
1478                list_for_each_entry(worker, &pool->idle_list, entry) {
1479                        worker->flags &= ~WORKER_REBIND;
1480                        idle_rebind.cnt++;
1481                }
1482        }
1483
1484        wake_up_all(&gcwq->rebind_hold);
1485
1486        if (--idle_rebind.cnt) {
1487                spin_unlock_irq(&gcwq->lock);
1488                wait_for_completion(&idle_rebind.done);
1489                spin_lock_irq(&gcwq->lock);
1490        }
1491}
1492
1493static struct worker *alloc_worker(void)
1494{
1495        struct worker *worker;
1496
1497        worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1498        if (worker) {
1499                INIT_LIST_HEAD(&worker->entry);
1500                INIT_LIST_HEAD(&worker->scheduled);
1501                INIT_WORK(&worker->rebind_work, busy_worker_rebind_fn);
1502                /* on creation a worker is in !idle && prep state */
1503                worker->flags = WORKER_PREP;
1504        }
1505        return worker;
1506}
1507
1508/**
1509 * create_worker - create a new workqueue worker
1510 * @pool: pool the new worker will belong to
1511 *
1512 * Create a new worker which is bound to @pool.  The returned worker
1513 * can be started by calling start_worker() or destroyed using
1514 * destroy_worker().
1515 *
1516 * CONTEXT:
1517 * Might sleep.  Does GFP_KERNEL allocations.
1518 *
1519 * RETURNS:
1520 * Pointer to the newly created worker.
1521 */
1522static struct worker *create_worker(struct worker_pool *pool)
1523{
1524        struct global_cwq *gcwq = pool->gcwq;
1525        const char *pri = worker_pool_pri(pool) ? "H" : "";
1526        struct worker *worker = NULL;
1527        int id = -1;
1528
1529        spin_lock_irq(&gcwq->lock);
1530        while (ida_get_new(&pool->worker_ida, &id)) {
1531                spin_unlock_irq(&gcwq->lock);
1532                if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL))
1533                        goto fail;
1534                spin_lock_irq(&gcwq->lock);
1535        }
1536        spin_unlock_irq(&gcwq->lock);
1537
1538        worker = alloc_worker();
1539        if (!worker)
1540                goto fail;
1541
1542        worker->pool = pool;
1543        worker->id = id;
1544
1545        if (gcwq->cpu != WORK_CPU_UNBOUND)
1546                worker->task = kthread_create_on_node(worker_thread,
1547                                        worker, cpu_to_node(gcwq->cpu),
1548                                        "kworker/%u:%d%s", gcwq->cpu, id, pri);
1549        else
1550                worker->task = kthread_create(worker_thread, worker,
1551                                              "kworker/u:%d%s", id, pri);
1552        if (IS_ERR(worker->task))
1553                goto fail;
1554
1555        if (worker_pool_pri(pool))
1556                set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
1557
1558        /*
1559         * Determine CPU binding of the new worker depending on
1560         * %GCWQ_DISASSOCIATED.  The caller is responsible for ensuring the
1561         * flag remains stable across this function.  See the comments
1562         * above the flag definition for details.
1563         *
1564         * As an unbound worker may later become a regular one if CPU comes
1565         * online, make sure every worker has %PF_THREAD_BOUND set.
1566         */
1567        if (!(gcwq->flags & GCWQ_DISASSOCIATED)) {
1568                kthread_bind(worker->task, gcwq->cpu);
1569        } else {
1570                worker->task->flags |= PF_THREAD_BOUND;
1571                worker->flags |= WORKER_UNBOUND;
1572        }
1573
1574        return worker;
1575fail:
1576        if (id >= 0) {
1577                spin_lock_irq(&gcwq->lock);
1578                ida_remove(&pool->worker_ida, id);
1579                spin_unlock_irq(&gcwq->lock);
1580        }
1581        kfree(worker);
1582        return NULL;
1583}
1584
1585/**
1586 * start_worker - start a newly created worker
1587 * @worker: worker to start
1588 *
1589 * Make the gcwq aware of @worker and start it.
1590 *
1591 * CONTEXT:
1592 * spin_lock_irq(gcwq->lock).
1593 */
1594static void start_worker(struct worker *worker)
1595{
1596        worker->flags |= WORKER_STARTED;
1597        worker->pool->nr_workers++;
1598        worker_enter_idle(worker);
1599        wake_up_process(worker->task);
1600}
1601
1602/**
1603 * destroy_worker - destroy a workqueue worker
1604 * @worker: worker to be destroyed
1605 *
1606 * Destroy @worker and adjust @gcwq stats accordingly.
1607 *
1608 * CONTEXT:
1609 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1610 */
1611static void destroy_worker(struct worker *worker)
1612{
1613        struct worker_pool *pool = worker->pool;
1614        struct global_cwq *gcwq = pool->gcwq;
1615        int id = worker->id;
1616
1617        /* sanity check frenzy */
1618        BUG_ON(worker->current_work);
1619        BUG_ON(!list_empty(&worker->scheduled));
1620
1621        if (worker->flags & WORKER_STARTED)
1622                pool->nr_workers--;
1623        if (worker->flags & WORKER_IDLE)
1624                pool->nr_idle--;
1625
1626        list_del_init(&worker->entry);
1627        worker->flags |= WORKER_DIE;
1628
1629        spin_unlock_irq(&gcwq->lock);
1630
1631        kthread_stop(worker->task);
1632        kfree(worker);
1633
1634        spin_lock_irq(&gcwq->lock);
1635        ida_remove(&pool->worker_ida, id);
1636}
1637
1638static void idle_worker_timeout(unsigned long __pool)
1639{
1640        struct worker_pool *pool = (void *)__pool;
1641        struct global_cwq *gcwq = pool->gcwq;
1642
1643        spin_lock_irq(&gcwq->lock);
1644
1645        if (too_many_workers(pool)) {
1646                struct worker *worker;
1647                unsigned long expires;
1648
1649                /* idle_list is kept in LIFO order, check the last one */
1650                worker = list_entry(pool->idle_list.prev, struct worker, entry);
1651                expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1652
1653                if (time_before(jiffies, expires))
1654                        mod_timer(&pool->idle_timer, expires);
1655                else {
1656                        /* it's been idle for too long, wake up manager */
1657                        pool->flags |= POOL_MANAGE_WORKERS;
1658                        wake_up_worker(pool);
1659                }
1660        }
1661
1662        spin_unlock_irq(&gcwq->lock);
1663}
1664
1665static bool send_mayday(struct work_struct *work)
1666{
1667        struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1668        struct workqueue_struct *wq = cwq->wq;
1669        unsigned int cpu;
1670
1671        if (!(wq->flags & WQ_RESCUER))
1672                return false;
1673
1674        /* mayday mayday mayday */
1675        cpu = cwq->pool->gcwq->cpu;
1676        /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1677        if (cpu == WORK_CPU_UNBOUND)
1678                cpu = 0;
1679        if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1680                wake_up_process(wq->rescuer->task);
1681        return true;
1682}
1683
1684static void gcwq_mayday_timeout(unsigned long __pool)
1685{
1686        struct worker_pool *pool = (void *)__pool;
1687        struct global_cwq *gcwq = pool->gcwq;
1688        struct work_struct *work;
1689
1690        spin_lock_irq(&gcwq->lock);
1691
1692        if (need_to_create_worker(pool)) {
1693                /*
1694                 * We've been trying to create a new worker but
1695                 * haven't been successful.  We might be hitting an
1696                 * allocation deadlock.  Send distress signals to
1697                 * rescuers.
1698                 */
1699                list_for_each_entry(work, &pool->worklist, entry)
1700                        send_mayday(work);
1701        }
1702
1703        spin_unlock_irq(&gcwq->lock);
1704
1705        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1706}
1707
1708/**
1709 * maybe_create_worker - create a new worker if necessary
1710 * @pool: pool to create a new worker for
1711 *
1712 * Create a new worker for @pool if necessary.  @pool is guaranteed to
1713 * have at least one idle worker on return from this function.  If
1714 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1715 * sent to all rescuers with works scheduled on @pool to resolve
1716 * possible allocation deadlock.
1717 *
1718 * On return, need_to_create_worker() is guaranteed to be false and
1719 * may_start_working() true.
1720 *
1721 * LOCKING:
1722 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1723 * multiple times.  Does GFP_KERNEL allocations.  Called only from
1724 * manager.
1725 *
1726 * RETURNS:
1727 * false if no action was taken and gcwq->lock stayed locked, true
1728 * otherwise.
1729 */
1730static bool maybe_create_worker(struct worker_pool *pool)
1731__releases(&gcwq->lock)
1732__acquires(&gcwq->lock)
1733{
1734        struct global_cwq *gcwq = pool->gcwq;
1735
1736        if (!need_to_create_worker(pool))
1737                return false;
1738restart:
1739        spin_unlock_irq(&gcwq->lock);
1740
1741        /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1742        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1743
1744        while (true) {
1745                struct worker *worker;
1746
1747                worker = create_worker(pool);
1748                if (worker) {
1749                        del_timer_sync(&pool->mayday_timer);
1750                        spin_lock_irq(&gcwq->lock);
1751                        start_worker(worker);
1752                        BUG_ON(need_to_create_worker(pool));
1753                        return true;
1754                }
1755
1756                if (!need_to_create_worker(pool))
1757                        break;
1758
1759                __set_current_state(TASK_INTERRUPTIBLE);
1760                schedule_timeout(CREATE_COOLDOWN);
1761
1762                if (!need_to_create_worker(pool))
1763                        break;
1764        }
1765
1766        del_timer_sync(&pool->mayday_timer);
1767        spin_lock_irq(&gcwq->lock);
1768        if (need_to_create_worker(pool))
1769                goto restart;
1770        return true;
1771}
1772
1773/**
1774 * maybe_destroy_worker - destroy workers which have been idle for a while
1775 * @pool: pool to destroy workers for
1776 *
1777 * Destroy @pool workers which have been idle for longer than
1778 * IDLE_WORKER_TIMEOUT.
1779 *
1780 * LOCKING:
1781 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1782 * multiple times.  Called only from manager.
1783 *
1784 * RETURNS:
1785 * false if no action was taken and gcwq->lock stayed locked, true
1786 * otherwise.
1787 */
1788static bool maybe_destroy_workers(struct worker_pool *pool)
1789{
1790        bool ret = false;
1791
1792        while (too_many_workers(pool)) {
1793                struct worker *worker;
1794                unsigned long expires;
1795
1796                worker = list_entry(pool->idle_list.prev, struct worker, entry);
1797                expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1798
1799                if (time_before(jiffies, expires)) {
1800                        mod_timer(&pool->idle_timer, expires);
1801                        break;
1802                }
1803
1804                destroy_worker(worker);
1805                ret = true;
1806        }
1807
1808        return ret;
1809}
1810
1811/**
1812 * manage_workers - manage worker pool
1813 * @worker: self
1814 *
1815 * Assume the manager role and manage gcwq worker pool @worker belongs
1816 * to.  At any given time, there can be only zero or one manager per
1817 * gcwq.  The exclusion is handled automatically by this function.
1818 *
1819 * The caller can safely start processing works on false return.  On
1820 * true return, it's guaranteed that need_to_create_worker() is false
1821 * and may_start_working() is true.
1822 *
1823 * CONTEXT:
1824 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1825 * multiple times.  Does GFP_KERNEL allocations.
1826 *
1827 * RETURNS:
1828 * false if no action was taken and gcwq->lock stayed locked, true if
1829 * some action was taken.
1830 */
1831static bool manage_workers(struct worker *worker)
1832{
1833        struct worker_pool *pool = worker->pool;
1834        bool ret = false;
1835
1836        if (pool->flags & POOL_MANAGING_WORKERS)
1837                return ret;
1838
1839        pool->flags |= POOL_MANAGING_WORKERS;
1840
1841        /*
1842         * To simplify both worker management and CPU hotplug, hold off
1843         * management while hotplug is in progress.  CPU hotplug path can't
1844         * grab %POOL_MANAGING_WORKERS to achieve this because that can
1845         * lead to idle worker depletion (all become busy thinking someone
1846         * else is managing) which in turn can result in deadlock under
1847         * extreme circumstances.  Use @pool->manager_mutex to synchronize
1848         * manager against CPU hotplug.
1849         *
1850         * manager_mutex would always be free unless CPU hotplug is in
1851         * progress.  trylock first without dropping @gcwq->lock.
1852         */
1853        if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
1854                spin_unlock_irq(&pool->gcwq->lock);
1855                mutex_lock(&pool->manager_mutex);
1856                /*
1857                 * CPU hotplug could have happened while we were waiting
1858                 * for manager_mutex.  Hotplug itself can't handle us
1859                 * because manager isn't either on idle or busy list, and
1860                 * @gcwq's state and ours could have deviated.
1861                 *
1862                 * As hotplug is now excluded via manager_mutex, we can
1863                 * simply try to bind.  It will succeed or fail depending
1864                 * on @gcwq's current state.  Try it and adjust
1865                 * %WORKER_UNBOUND accordingly.
1866                 */
1867                if (worker_maybe_bind_and_lock(worker))
1868                        worker->flags &= ~WORKER_UNBOUND;
1869                else
1870                        worker->flags |= WORKER_UNBOUND;
1871
1872                ret = true;
1873        }
1874
1875        pool->flags &= ~POOL_MANAGE_WORKERS;
1876
1877        /*
1878         * Destroy and then create so that may_start_working() is true
1879         * on return.
1880         */
1881        ret |= maybe_destroy_workers(pool);
1882        ret |= maybe_create_worker(pool);
1883
1884        pool->flags &= ~POOL_MANAGING_WORKERS;
1885        mutex_unlock(&pool->manager_mutex);
1886        return ret;
1887}
1888
1889/**
1890 * move_linked_works - move linked works to a list
1891 * @work: start of series of works to be scheduled
1892 * @head: target list to append @work to
1893 * @nextp: out paramter for nested worklist walking
1894 *
1895 * Schedule linked works starting from @work to @head.  Work series to
1896 * be scheduled starts at @work and includes any consecutive work with
1897 * WORK_STRUCT_LINKED set in its predecessor.
1898 *
1899 * If @nextp is not NULL, it's updated to point to the next work of
1900 * the last scheduled work.  This allows move_linked_works() to be
1901 * nested inside outer list_for_each_entry_safe().
1902 *
1903 * CONTEXT:
1904 * spin_lock_irq(gcwq->lock).
1905 */
1906static void move_linked_works(struct work_struct *work, struct list_head *head,
1907                              struct work_struct **nextp)
1908{
1909        struct work_struct *n;
1910
1911        /*
1912         * Linked worklist will always end before the end of the list,
1913         * use NULL for list head.
1914         */
1915        list_for_each_entry_safe_from(work, n, NULL, entry) {
1916                list_move_tail(&work->entry, head);
1917                if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1918                        break;
1919        }
1920
1921        /*
1922         * If we're already inside safe list traversal and have moved
1923         * multiple works to the scheduled queue, the next position
1924         * needs to be updated.
1925         */
1926        if (nextp)
1927                *nextp = n;
1928}
1929
1930static void cwq_activate_delayed_work(struct work_struct *work)
1931{
1932        struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1933
1934        trace_workqueue_activate_work(work);
1935        move_linked_works(work, &cwq->pool->worklist, NULL);
1936        __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1937        cwq->nr_active++;
1938}
1939
1940static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1941{
1942        struct work_struct *work = list_first_entry(&cwq->delayed_works,
1943                                                    struct work_struct, entry);
1944
1945        cwq_activate_delayed_work(work);
1946}
1947
1948/**
1949 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1950 * @cwq: cwq of interest
1951 * @color: color of work which left the queue
1952 * @delayed: for a delayed work
1953 *
1954 * A work either has completed or is removed from pending queue,
1955 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1956 *
1957 * CONTEXT:
1958 * spin_lock_irq(gcwq->lock).
1959 */
1960static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1961                                 bool delayed)
1962{
1963        /* ignore uncolored works */
1964        if (color == WORK_NO_COLOR)
1965                return;
1966
1967        cwq->nr_in_flight[color]--;
1968
1969        if (!delayed) {
1970                cwq->nr_active--;
1971                if (!list_empty(&cwq->delayed_works)) {
1972                        /* one down, submit a delayed one */
1973                        if (cwq->nr_active < cwq->max_active)
1974                                cwq_activate_first_delayed(cwq);
1975                }
1976        }
1977
1978        /* is flush in progress and are we at the flushing tip? */
1979        if (likely(cwq->flush_color != color))
1980                return;
1981
1982        /* are there still in-flight works? */
1983        if (cwq->nr_in_flight[color])
1984                return;
1985
1986        /* this cwq is done, clear flush_color */
1987        cwq->flush_color = -1;
1988
1989        /*
1990         * If this was the last cwq, wake up the first flusher.  It
1991         * will handle the rest.
1992         */
1993        if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1994                complete(&cwq->wq->first_flusher->done);
1995}
1996
1997/**
1998 * process_one_work - process single work
1999 * @worker: self
2000 * @work: work to process
2001 *
2002 * Process @work.  This function contains all the logics necessary to
2003 * process a single work including synchronization against and
2004 * interaction with other workers on the same cpu, queueing and
2005 * flushing.  As long as context requirement is met, any worker can
2006 * call this function to process a work.
2007 *
2008 * CONTEXT:
2009 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
2010 */
2011static void process_one_work(struct worker *worker, struct work_struct *work)
2012__releases(&gcwq->lock)
2013__acquires(&gcwq->lock)
2014{
2015        struct cpu_workqueue_struct *cwq = get_work_cwq(work);
2016        struct worker_pool *pool = worker->pool;
2017        struct global_cwq *gcwq = pool->gcwq;
2018        struct hlist_head *bwh = busy_worker_head(gcwq, work);
2019        bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
2020        work_func_t f = work->func;
2021        int work_color;
2022        struct worker *collision;
2023#ifdef CONFIG_LOCKDEP
2024        /*
2025         * It is permissible to free the struct work_struct from
2026         * inside the function that is called from it, this we need to
2027         * take into account for lockdep too.  To avoid bogus "held
2028         * lock freed" warnings as well as problems when looking into
2029         * work->lockdep_map, make a copy and use that here.
2030         */
2031        struct lockdep_map lockdep_map;
2032
2033        lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2034#endif
2035        /*
2036         * Ensure we're on the correct CPU.  DISASSOCIATED test is
2037         * necessary to avoid spurious warnings from rescuers servicing the
2038         * unbound or a disassociated gcwq.
2039         */
2040        WARN_ON_ONCE(!(worker->flags & (WORKER_UNBOUND | WORKER_REBIND)) &&
2041                     !(gcwq->flags & GCWQ_DISASSOCIATED) &&
2042                     raw_smp_processor_id() != gcwq->cpu);
2043
2044        /*
2045         * A single work shouldn't be executed concurrently by
2046         * multiple workers on a single cpu.  Check whether anyone is
2047         * already processing the work.  If so, defer the work to the
2048         * currently executing one.
2049         */
2050        collision = __find_worker_executing_work(gcwq, bwh, work);
2051        if (unlikely(collision)) {
2052                move_linked_works(work, &collision->scheduled, NULL);
2053                return;
2054        }
2055
2056        /* claim and process */
2057        debug_work_deactivate(work);
2058        hlist_add_head(&worker->hentry, bwh);
2059        worker->current_work = work;
2060        worker->current_cwq = cwq;
2061        work_color = get_work_color(work);
2062
2063        /* record the current cpu number in the work data and dequeue */
2064        set_work_cpu(work, gcwq->cpu);
2065        list_del_init(&work->entry);
2066
2067        /*
2068         * CPU intensive works don't participate in concurrency
2069         * management.  They're the scheduler's responsibility.
2070         */
2071        if (unlikely(cpu_intensive))
2072                worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
2073
2074        /*
2075         * Unbound gcwq isn't concurrency managed and work items should be
2076         * executed ASAP.  Wake up another worker if necessary.
2077         */
2078        if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
2079                wake_up_worker(pool);
2080
2081        spin_unlock_irq(&gcwq->lock);
2082
2083        smp_wmb();      /* paired with test_and_set_bit(PENDING) */
2084        work_clear_pending(work);
2085
2086        lock_map_acquire_read(&cwq->wq->lockdep_map);
2087        lock_map_acquire(&lockdep_map);
2088        trace_workqueue_execute_start(work);
2089        f(work);
2090        /*
2091         * While we must be careful to not use "work" after this, the trace
2092         * point will only record its address.
2093         */
2094        trace_workqueue_execute_end(work);
2095        lock_map_release(&lockdep_map);
2096        lock_map_release(&cwq->wq->lockdep_map);
2097
2098        if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2099                printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
2100                       "%s/0x%08x/%d\n",
2101                       current->comm, preempt_count(), task_pid_nr(current));
2102                printk(KERN_ERR "    last function: ");
2103                print_symbol("%s\n", (unsigned long)f);
2104                debug_show_held_locks(current);
2105                dump_stack();
2106        }
2107
2108        spin_lock_irq(&gcwq->lock);
2109
2110        /* clear cpu intensive status */
2111        if (unlikely(cpu_intensive))
2112                worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2113
2114        /* we're done with it, release */
2115        hlist_del_init(&worker->hentry);
2116        worker->current_work = NULL;
2117        worker->current_cwq = NULL;
2118        cwq_dec_nr_in_flight(cwq, work_color, false);
2119}
2120
2121/**
2122 * process_scheduled_works - process scheduled works
2123 * @worker: self
2124 *
2125 * Process all scheduled works.  Please note that the scheduled list
2126 * may change while processing a work, so this function repeatedly
2127 * fetches a work from the top and executes it.
2128 *
2129 * CONTEXT:
2130 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2131 * multiple times.
2132 */
2133static void process_scheduled_works(struct worker *worker)
2134{
2135        while (!list_empty(&worker->scheduled)) {
2136                struct work_struct *work = list_first_entry(&worker->scheduled,
2137                                                struct work_struct, entry);
2138                process_one_work(worker, work);
2139        }
2140}
2141
2142/**
2143 * worker_thread - the worker thread function
2144 * @__worker: self
2145 *
2146 * The gcwq worker thread function.  There's a single dynamic pool of
2147 * these per each cpu.  These workers process all works regardless of
2148 * their specific target workqueue.  The only exception is works which
2149 * belong to workqueues with a rescuer which will be explained in
2150 * rescuer_thread().
2151 */
2152static int worker_thread(void *__worker)
2153{
2154        struct worker *worker = __worker;
2155        struct worker_pool *pool = worker->pool;
2156        struct global_cwq *gcwq = pool->gcwq;
2157
2158        /* tell the scheduler that this is a workqueue worker */
2159        worker->task->flags |= PF_WQ_WORKER;
2160woke_up:
2161        spin_lock_irq(&gcwq->lock);
2162
2163        /*
2164         * DIE can be set only while idle and REBIND set while busy has
2165         * @worker->rebind_work scheduled.  Checking here is enough.
2166         */
2167        if (unlikely(worker->flags & (WORKER_REBIND | WORKER_DIE))) {
2168                spin_unlock_irq(&gcwq->lock);
2169
2170                if (worker->flags & WORKER_DIE) {
2171                        worker->task->flags &= ~PF_WQ_WORKER;
2172                        return 0;
2173                }
2174
2175                idle_worker_rebind(worker);
2176                goto woke_up;
2177        }
2178
2179        worker_leave_idle(worker);
2180recheck:
2181        /* no more worker necessary? */
2182        if (!need_more_worker(pool))
2183                goto sleep;
2184
2185        /* do we need to manage? */
2186        if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2187                goto recheck;
2188
2189        /*
2190         * ->scheduled list can only be filled while a worker is
2191         * preparing to process a work or actually processing it.
2192         * Make sure nobody diddled with it while I was sleeping.
2193         */
2194        BUG_ON(!list_empty(&worker->scheduled));
2195
2196        /*
2197         * When control reaches this point, we're guaranteed to have
2198         * at least one idle worker or that someone else has already
2199         * assumed the manager role.
2200         */
2201        worker_clr_flags(worker, WORKER_PREP);
2202
2203        do {
2204                struct work_struct *work =
2205                        list_first_entry(&pool->worklist,
2206                                         struct work_struct, entry);
2207
2208                if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2209                        /* optimization path, not strictly necessary */
2210                        process_one_work(worker, work);
2211                        if (unlikely(!list_empty(&worker->scheduled)))
2212                                process_scheduled_works(worker);
2213                } else {
2214                        move_linked_works(work, &worker->scheduled, NULL);
2215                        process_scheduled_works(worker);
2216                }
2217        } while (keep_working(pool));
2218
2219        worker_set_flags(worker, WORKER_PREP, false);
2220sleep:
2221        if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker))
2222                goto recheck;
2223
2224        /*
2225         * gcwq->lock is held and there's no work to process and no
2226         * need to manage, sleep.  Workers are woken up only while
2227         * holding gcwq->lock or from local cpu, so setting the
2228         * current state before releasing gcwq->lock is enough to
2229         * prevent losing any event.
2230         */
2231        worker_enter_idle(worker);
2232        __set_current_state(TASK_INTERRUPTIBLE);
2233        spin_unlock_irq(&gcwq->lock);
2234        schedule();
2235        goto woke_up;
2236}
2237
2238/**
2239 * rescuer_thread - the rescuer thread function
2240 * @__wq: the associated workqueue
2241 *
2242 * Workqueue rescuer thread function.  There's one rescuer for each
2243 * workqueue which has WQ_RESCUER set.
2244 *
2245 * Regular work processing on a gcwq may block trying to create a new
2246 * worker which uses GFP_KERNEL allocation which has slight chance of
2247 * developing into deadlock if some works currently on the same queue
2248 * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2249 * the problem rescuer solves.
2250 *
2251 * When such condition is possible, the gcwq summons rescuers of all
2252 * workqueues which have works queued on the gcwq and let them process
2253 * those works so that forward progress can be guaranteed.
2254 *
2255 * This should happen rarely.
2256 */
2257static int rescuer_thread(void *__wq)
2258{
2259        struct workqueue_struct *wq = __wq;
2260        struct worker *rescuer = wq->rescuer;
2261        struct list_head *scheduled = &rescuer->scheduled;
2262        bool is_unbound = wq->flags & WQ_UNBOUND;
2263        unsigned int cpu;
2264
2265        set_user_nice(current, RESCUER_NICE_LEVEL);
2266repeat:
2267        set_current_state(TASK_INTERRUPTIBLE);
2268
2269        if (kthread_should_stop())
2270                return 0;
2271
2272        /*
2273         * See whether any cpu is asking for help.  Unbounded
2274         * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
2275         */
2276        for_each_mayday_cpu(cpu, wq->mayday_mask) {
2277                unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2278                struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
2279                struct worker_pool *pool = cwq->pool;
2280                struct global_cwq *gcwq = pool->gcwq;
2281                struct work_struct *work, *n;
2282
2283                __set_current_state(TASK_RUNNING);
2284                mayday_clear_cpu(cpu, wq->mayday_mask);
2285
2286                /* migrate to the target cpu if possible */
2287                rescuer->pool = pool;
2288                worker_maybe_bind_and_lock(rescuer);
2289
2290                /*
2291                 * Slurp in all works issued via this workqueue and
2292                 * process'em.
2293                 */
2294                BUG_ON(!list_empty(&rescuer->scheduled));
2295                list_for_each_entry_safe(work, n, &pool->worklist, entry)
2296                        if (get_work_cwq(work) == cwq)
2297                                move_linked_works(work, scheduled, &n);
2298
2299                process_scheduled_works(rescuer);
2300
2301                /*
2302                 * Leave this gcwq.  If keep_working() is %true, notify a
2303                 * regular worker; otherwise, we end up with 0 concurrency
2304                 * and stalling the execution.
2305                 */
2306                if (keep_working(pool))
2307                        wake_up_worker(pool);
2308
2309                spin_unlock_irq(&gcwq->lock);
2310        }
2311
2312        schedule();
2313        goto repeat;
2314}
2315
2316struct wq_barrier {
2317        struct work_struct      work;
2318        struct completion       done;
2319};
2320
2321static void wq_barrier_func(struct work_struct *work)
2322{
2323        struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2324        complete(&barr->done);
2325}
2326
2327/**
2328 * insert_wq_barrier - insert a barrier work
2329 * @cwq: cwq to insert barrier into
2330 * @barr: wq_barrier to insert
2331 * @target: target work to attach @barr to
2332 * @worker: worker currently executing @target, NULL if @target is not executing
2333 *
2334 * @barr is linked to @target such that @barr is completed only after
2335 * @target finishes execution.  Please note that the ordering
2336 * guarantee is observed only with respect to @target and on the local
2337 * cpu.
2338 *
2339 * Currently, a queued barrier can't be canceled.  This is because
2340 * try_to_grab_pending() can't determine whether the work to be
2341 * grabbed is at the head of the queue and thus can't clear LINKED
2342 * flag of the previous work while there must be a valid next work
2343 * after a work with LINKED flag set.
2344 *
2345 * Note that when @worker is non-NULL, @target may be modified
2346 * underneath us, so we can't reliably determine cwq from @target.
2347 *
2348 * CONTEXT:
2349 * spin_lock_irq(gcwq->lock).
2350 */
2351static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2352                              struct wq_barrier *barr,
2353                              struct work_struct *target, struct worker *worker)
2354{
2355        struct list_head *head;
2356        unsigned int linked = 0;
2357
2358        /*
2359         * debugobject calls are safe here even with gcwq->lock locked
2360         * as we know for sure that this will not trigger any of the
2361         * checks and call back into the fixup functions where we
2362         * might deadlock.
2363         */
2364        INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2365        __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2366        init_completion(&barr->done);
2367
2368        /*
2369         * If @target is currently being executed, schedule the
2370         * barrier to the worker; otherwise, put it after @target.
2371         */
2372        if (worker)
2373                head = worker->scheduled.next;
2374        else {
2375                unsigned long *bits = work_data_bits(target);
2376
2377                head = target->entry.next;
2378                /* there can already be other linked works, inherit and set */
2379                linked = *bits & WORK_STRUCT_LINKED;
2380                __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2381        }
2382
2383        debug_work_activate(&barr->work);
2384        insert_work(cwq, &barr->work, head,
2385                    work_color_to_flags(WORK_NO_COLOR) | linked);
2386}
2387
2388/**
2389 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2390 * @wq: workqueue being flushed
2391 * @flush_color: new flush color, < 0 for no-op
2392 * @work_color: new work color, < 0 for no-op
2393 *
2394 * Prepare cwqs for workqueue flushing.
2395 *
2396 * If @flush_color is non-negative, flush_color on all cwqs should be
2397 * -1.  If no cwq has in-flight commands at the specified color, all
2398 * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
2399 * has in flight commands, its cwq->flush_color is set to
2400 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2401 * wakeup logic is armed and %true is returned.
2402 *
2403 * The caller should have initialized @wq->first_flusher prior to
2404 * calling this function with non-negative @flush_color.  If
2405 * @flush_color is negative, no flush color update is done and %false
2406 * is returned.
2407 *
2408 * If @work_color is non-negative, all cwqs should have the same
2409 * work_color which is previous to @work_color and all will be
2410 * advanced to @work_color.
2411 *
2412 * CONTEXT:
2413 * mutex_lock(wq->flush_mutex).
2414 *
2415 * RETURNS:
2416 * %true if @flush_color >= 0 and there's something to flush.  %false
2417 * otherwise.
2418 */
2419static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
2420                                      int flush_color, int work_color)
2421{
2422        bool wait = false;
2423        unsigned int cpu;
2424
2425        if (flush_color >= 0) {
2426                BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
2427                atomic_set(&wq->nr_cwqs_to_flush, 1);
2428        }
2429
2430        for_each_cwq_cpu(cpu, wq) {
2431                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2432                struct global_cwq *gcwq = cwq->pool->gcwq;
2433
2434                spin_lock_irq(&gcwq->lock);
2435
2436                if (flush_color >= 0) {
2437                        BUG_ON(cwq->flush_color != -1);
2438
2439                        if (cwq->nr_in_flight[flush_color]) {
2440                                cwq->flush_color = flush_color;
2441                                atomic_inc(&wq->nr_cwqs_to_flush);
2442                                wait = true;
2443                        }
2444                }
2445
2446                if (work_color >= 0) {
2447                        BUG_ON(work_color != work_next_color(cwq->work_color));
2448                        cwq->work_color = work_color;
2449                }
2450
2451                spin_unlock_irq(&gcwq->lock);
2452        }
2453
2454        if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
2455                complete(&wq->first_flusher->done);
2456
2457        return wait;
2458}
2459
2460/**
2461 * flush_workqueue - ensure that any scheduled work has run to completion.
2462 * @wq: workqueue to flush
2463 *
2464 * Forces execution of the workqueue and blocks until its completion.
2465 * This is typically used in driver shutdown handlers.
2466 *
2467 * We sleep until all works which were queued on entry have been handled,
2468 * but we are not livelocked by new incoming ones.
2469 */
2470void flush_workqueue(struct workqueue_struct *wq)
2471{
2472        struct wq_flusher this_flusher = {
2473                .list = LIST_HEAD_INIT(this_flusher.list),
2474                .flush_color = -1,
2475                .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2476        };
2477        int next_color;
2478
2479        lock_map_acquire(&wq->lockdep_map);
2480        lock_map_release(&wq->lockdep_map);
2481
2482        mutex_lock(&wq->flush_mutex);
2483
2484        /*
2485         * Start-to-wait phase
2486         */
2487        next_color = work_next_color(wq->work_color);
2488
2489        if (next_color != wq->flush_color) {
2490                /*
2491                 * Color space is not full.  The current work_color
2492                 * becomes our flush_color and work_color is advanced
2493                 * by one.
2494                 */
2495                BUG_ON(!list_empty(&wq->flusher_overflow));
2496                this_flusher.flush_color = wq->work_color;
2497                wq->work_color = next_color;
2498
2499                if (!wq->first_flusher) {
2500                        /* no flush in progress, become the first flusher */
2501                        BUG_ON(wq->flush_color != this_flusher.flush_color);
2502
2503                        wq->first_flusher = &this_flusher;
2504
2505                        if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2506                                                       wq->work_color)) {
2507                                /* nothing to flush, done */
2508                                wq->flush_color = next_color;
2509                                wq->first_flusher = NULL;
2510                                goto out_unlock;
2511                        }
2512                } else {
2513                        /* wait in queue */
2514                        BUG_ON(wq->flush_color == this_flusher.flush_color);
2515                        list_add_tail(&this_flusher.list, &wq->flusher_queue);
2516                        flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2517                }
2518        } else {
2519                /*
2520                 * Oops, color space is full, wait on overflow queue.
2521                 * The next flush completion will assign us
2522                 * flush_color and transfer to flusher_queue.
2523                 */
2524                list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2525        }
2526
2527        mutex_unlock(&wq->flush_mutex);
2528
2529        wait_for_completion(&this_flusher.done);
2530
2531        /*
2532         * Wake-up-and-cascade phase
2533         *
2534         * First flushers are responsible for cascading flushes and
2535         * handling overflow.  Non-first flushers can simply return.
2536         */
2537        if (wq->first_flusher != &this_flusher)
2538                return;
2539
2540        mutex_lock(&wq->flush_mutex);
2541
2542        /* we might have raced, check again with mutex held */
2543        if (wq->first_flusher != &this_flusher)
2544                goto out_unlock;
2545
2546        wq->first_flusher = NULL;
2547
2548        BUG_ON(!list_empty(&this_flusher.list));
2549        BUG_ON(wq->flush_color != this_flusher.flush_color);
2550
2551        while (true) {
2552                struct wq_flusher *next, *tmp;
2553
2554                /* complete all the flushers sharing the current flush color */
2555                list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2556                        if (next->flush_color != wq->flush_color)
2557                                break;
2558                        list_del_init(&next->list);
2559                        complete(&next->done);
2560                }
2561
2562                BUG_ON(!list_empty(&wq->flusher_overflow) &&
2563                       wq->flush_color != work_next_color(wq->work_color));
2564
2565                /* this flush_color is finished, advance by one */
2566                wq->flush_color = work_next_color(wq->flush_color);
2567
2568                /* one color has been freed, handle overflow queue */
2569                if (!list_empty(&wq->flusher_overflow)) {
2570                        /*
2571                         * Assign the same color to all overflowed
2572                         * flushers, advance work_color and append to
2573                         * flusher_queue.  This is the start-to-wait
2574                         * phase for these overflowed flushers.
2575                         */
2576                        list_for_each_entry(tmp, &wq->flusher_overflow, list)
2577                                tmp->flush_color = wq->work_color;
2578
2579                        wq->work_color = work_next_color(wq->work_color);
2580
2581                        list_splice_tail_init(&wq->flusher_overflow,
2582                                              &wq->flusher_queue);
2583                        flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2584                }
2585
2586                if (list_empty(&wq->flusher_queue)) {
2587                        BUG_ON(wq->flush_color != wq->work_color);
2588                        break;
2589                }
2590
2591                /*
2592                 * Need to flush more colors.  Make the next flusher
2593                 * the new first flusher and arm cwqs.
2594                 */
2595                BUG_ON(wq->flush_color == wq->work_color);
2596                BUG_ON(wq->flush_color != next->flush_color);
2597
2598                list_del_init(&next->list);
2599                wq->first_flusher = next;
2600
2601                if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2602                        break;
2603
2604                /*
2605                 * Meh... this color is already done, clear first
2606                 * flusher and repeat cascading.
2607                 */
2608                wq->first_flusher = NULL;
2609        }
2610
2611out_unlock:
2612        mutex_unlock(&wq->flush_mutex);
2613}
2614EXPORT_SYMBOL_GPL(flush_workqueue);
2615
2616/**
2617 * drain_workqueue - drain a workqueue
2618 * @wq: workqueue to drain
2619 *
2620 * Wait until the workqueue becomes empty.  While draining is in progress,
2621 * only chain queueing is allowed.  IOW, only currently pending or running
2622 * work items on @wq can queue further work items on it.  @wq is flushed
2623 * repeatedly until it becomes empty.  The number of flushing is detemined
2624 * by the depth of chaining and should be relatively short.  Whine if it
2625 * takes too long.
2626 */
2627void drain_workqueue(struct workqueue_struct *wq)
2628{
2629        unsigned int flush_cnt = 0;
2630        unsigned int cpu;
2631
2632        /*
2633         * __queue_work() needs to test whether there are drainers, is much
2634         * hotter than drain_workqueue() and already looks at @wq->flags.
2635         * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
2636         */
2637        spin_lock(&workqueue_lock);
2638        if (!wq->nr_drainers++)
2639                wq->flags |= WQ_DRAINING;
2640        spin_unlock(&workqueue_lock);
2641reflush:
2642        flush_workqueue(wq);
2643
2644        for_each_cwq_cpu(cpu, wq) {
2645                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2646                bool drained;
2647
2648                spin_lock_irq(&cwq->pool->gcwq->lock);
2649                drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
2650                spin_unlock_irq(&cwq->pool->gcwq->lock);
2651
2652                if (drained)
2653                        continue;
2654
2655                if (++flush_cnt == 10 ||
2656                    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2657                        pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
2658                                   wq->name, flush_cnt);
2659                goto reflush;
2660        }
2661
2662        spin_lock(&workqueue_lock);
2663        if (!--wq->nr_drainers)
2664                wq->flags &= ~WQ_DRAINING;
2665        spin_unlock(&workqueue_lock);
2666}
2667EXPORT_SYMBOL_GPL(drain_workqueue);
2668
2669static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2670                             bool wait_executing)
2671{
2672        struct worker *worker = NULL;
2673        struct global_cwq *gcwq;
2674        struct cpu_workqueue_struct *cwq;
2675
2676        might_sleep();
2677        gcwq = get_work_gcwq(work);
2678        if (!gcwq)
2679                return false;
2680
2681        spin_lock_irq(&gcwq->lock);
2682        if (!list_empty(&work->entry)) {
2683                /*
2684                 * See the comment near try_to_grab_pending()->smp_rmb().
2685                 * If it was re-queued to a different gcwq under us, we
2686                 * are not going to wait.
2687                 */
2688                smp_rmb();
2689                cwq = get_work_cwq(work);
2690                if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
2691                        goto already_gone;
2692        } else if (wait_executing) {
2693                worker = find_worker_executing_work(gcwq, work);
2694                if (!worker)
2695                        goto already_gone;
2696                cwq = worker->current_cwq;
2697        } else
2698                goto already_gone;
2699
2700        insert_wq_barrier(cwq, barr, work, worker);
2701        spin_unlock_irq(&gcwq->lock);
2702
2703        /*
2704         * If @max_active is 1 or rescuer is in use, flushing another work
2705         * item on the same workqueue may lead to deadlock.  Make sure the
2706         * flusher is not running on the same workqueue by verifying write
2707         * access.
2708         */
2709        if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2710                lock_map_acquire(&cwq->wq->lockdep_map);
2711        else
2712                lock_map_acquire_read(&cwq->wq->lockdep_map);
2713        lock_map_release(&cwq->wq->lockdep_map);
2714
2715        return true;
2716already_gone:
2717        spin_unlock_irq(&gcwq->lock);
2718        return false;
2719}
2720
2721/**
2722 * flush_work - wait for a work to finish executing the last queueing instance
2723 * @work: the work to flush
2724 *
2725 * Wait until @work has finished execution.  This function considers
2726 * only the last queueing instance of @work.  If @work has been
2727 * enqueued across different CPUs on a non-reentrant workqueue or on
2728 * multiple workqueues, @work might still be executing on return on
2729 * some of the CPUs from earlier queueing.
2730 *
2731 * If @work was queued only on a non-reentrant, ordered or unbound
2732 * workqueue, @work is guaranteed to be idle on return if it hasn't
2733 * been requeued since flush started.
2734 *
2735 * RETURNS:
2736 * %true if flush_work() waited for the work to finish execution,
2737 * %false if it was already idle.
2738 */
2739bool flush_work(struct work_struct *work)
2740{
2741        struct wq_barrier barr;
2742
2743        lock_map_acquire(&work->lockdep_map);
2744        lock_map_release(&work->lockdep_map);
2745
2746        if (start_flush_work(work, &barr, true)) {
2747                wait_for_completion(&barr.done);
2748                destroy_work_on_stack(&barr.work);
2749                return true;
2750        } else
2751                return false;
2752}
2753EXPORT_SYMBOL_GPL(flush_work);
2754
2755static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2756{
2757        struct wq_barrier barr;
2758        struct worker *worker;
2759
2760        spin_lock_irq(&gcwq->lock);
2761
2762        worker = find_worker_executing_work(gcwq, work);
2763        if (unlikely(worker))
2764                insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2765
2766        spin_unlock_irq(&gcwq->lock);
2767
2768        if (unlikely(worker)) {
2769                wait_for_completion(&barr.done);
2770                destroy_work_on_stack(&barr.work);
2771                return true;
2772        } else
2773                return false;
2774}
2775
2776static bool wait_on_work(struct work_struct *work)
2777{
2778        bool ret = false;
2779        int cpu;
2780
2781        might_sleep();
2782
2783        lock_map_acquire(&work->lockdep_map);
2784        lock_map_release(&work->lockdep_map);
2785
2786        for_each_gcwq_cpu(cpu)
2787                ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2788        return ret;
2789}
2790
2791/**
2792 * flush_work_sync - wait until a work has finished execution
2793 * @work: the work to flush
2794 *
2795 * Wait until @work has finished execution.  On return, it's
2796 * guaranteed that all queueing instances of @work which happened
2797 * before this function is called are finished.  In other words, if
2798 * @work hasn't been requeued since this function was called, @work is
2799 * guaranteed to be idle on return.
2800 *
2801 * RETURNS:
2802 * %true if flush_work_sync() waited for the work to finish execution,
2803 * %false if it was already idle.
2804 */
2805bool flush_work_sync(struct work_struct *work)
2806{
2807        struct wq_barrier barr;
2808        bool pending, waited;
2809
2810        /* we'll wait for executions separately, queue barr only if pending */
2811        pending = start_flush_work(work, &barr, false);
2812
2813        /* wait for executions to finish */
2814        waited = wait_on_work(work);
2815
2816        /* wait for the pending one */
2817        if (pending) {
2818                wait_for_completion(&barr.done);
2819                destroy_work_on_stack(&barr.work);
2820        }
2821
2822        return pending || waited;
2823}
2824EXPORT_SYMBOL_GPL(flush_work_sync);
2825
2826/*
2827 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2828 * so this work can't be re-armed in any way.
2829 */
2830static int try_to_grab_pending(struct work_struct *work)
2831{
2832        struct global_cwq *gcwq;
2833        int ret = -1;
2834
2835        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
2836                return 0;
2837
2838        /*
2839         * The queueing is in progress, or it is already queued. Try to
2840         * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2841         */
2842        gcwq = get_work_gcwq(work);
2843        if (!gcwq)
2844                return ret;
2845
2846        spin_lock_irq(&gcwq->lock);
2847        if (!list_empty(&work->entry)) {
2848                /*
2849                 * This work is queued, but perhaps we locked the wrong gcwq.
2850                 * In that case we must see the new value after rmb(), see
2851                 * insert_work()->wmb().
2852                 */
2853                smp_rmb();
2854                if (gcwq == get_work_gcwq(work)) {
2855                        debug_work_deactivate(work);
2856
2857                        /*
2858                         * A delayed work item cannot be grabbed directly
2859                         * because it might have linked NO_COLOR work items
2860                         * which, if left on the delayed_list, will confuse
2861                         * cwq->nr_active management later on and cause
2862                         * stall.  Make sure the work item is activated
2863                         * before grabbing.
2864                         */
2865                        if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
2866                                cwq_activate_delayed_work(work);
2867
2868                        list_del_init(&work->entry);
2869                        cwq_dec_nr_in_flight(get_work_cwq(work),
2870                                get_work_color(work),
2871                                *work_data_bits(work) & WORK_STRUCT_DELAYED);
2872                        ret = 1;
2873                }
2874        }
2875        spin_unlock_irq(&gcwq->lock);
2876
2877        return ret;
2878}
2879
2880static bool __cancel_work_timer(struct work_struct *work,
2881                                struct timer_list* timer)
2882{
2883        int ret;
2884
2885        do {
2886                ret = (timer && likely(del_timer(timer)));
2887                if (!ret)
2888                        ret = try_to_grab_pending(work);
2889                wait_on_work(work);
2890        } while (unlikely(ret < 0));
2891
2892        clear_work_data(work);
2893        return ret;
2894}
2895
2896/**
2897 * cancel_work_sync - cancel a work and wait for it to finish
2898 * @work: the work to cancel
2899 *
2900 * Cancel @work and wait for its execution to finish.  This function
2901 * can be used even if the work re-queues itself or migrates to
2902 * another workqueue.  On return from this function, @work is
2903 * guaranteed to be not pending or executing on any CPU.
2904 *
2905 * cancel_work_sync(&delayed_work->work) must not be used for
2906 * delayed_work's.  Use cancel_delayed_work_sync() instead.
2907 *
2908 * The caller must ensure that the workqueue on which @work was last
2909 * queued can't be destroyed before this function returns.
2910 *
2911 * RETURNS:
2912 * %true if @work was pending, %false otherwise.
2913 */
2914bool cancel_work_sync(struct work_struct *work)
2915{
2916        return __cancel_work_timer(work, NULL);
2917}
2918EXPORT_SYMBOL_GPL(cancel_work_sync);
2919
2920/**
2921 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2922 * @dwork: the delayed work to flush
2923 *
2924 * Delayed timer is cancelled and the pending work is queued for
2925 * immediate execution.  Like flush_work(), this function only
2926 * considers the last queueing instance of @dwork.
2927 *
2928 * RETURNS:
2929 * %true if flush_work() waited for the work to finish execution,
2930 * %false if it was already idle.
2931 */
2932bool flush_delayed_work(struct delayed_work *dwork)
2933{
2934        if (del_timer_sync(&dwork->timer))
2935                __queue_work(raw_smp_processor_id(),
2936                             get_work_cwq(&dwork->work)->wq, &dwork->work);
2937        return flush_work(&dwork->work);
2938}
2939EXPORT_SYMBOL(flush_delayed_work);
2940
2941/**
2942 * flush_delayed_work_sync - wait for a dwork to finish
2943 * @dwork: the delayed work to flush
2944 *
2945 * Delayed timer is cancelled and the pending work is queued for
2946 * execution immediately.  Other than timer handling, its behavior
2947 * is identical to flush_work_sync().
2948 *
2949 * RETURNS:
2950 * %true if flush_work_sync() waited for the work to finish execution,
2951 * %false if it was already idle.
2952 */
2953bool flush_delayed_work_sync(struct delayed_work *dwork)
2954{
2955        if (del_timer_sync(&dwork->timer))
2956                __queue_work(raw_smp_processor_id(),
2957                             get_work_cwq(&dwork->work)->wq, &dwork->work);
2958        return flush_work_sync(&dwork->work);
2959}
2960EXPORT_SYMBOL(flush_delayed_work_sync);
2961
2962/**
2963 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2964 * @dwork: the delayed work cancel
2965 *
2966 * This is cancel_work_sync() for delayed works.
2967 *
2968 * RETURNS:
2969 * %true if @dwork was pending, %false otherwise.
2970 */
2971bool cancel_delayed_work_sync(struct delayed_work *dwork)
2972{
2973        return __cancel_work_timer(&dwork->work, &dwork->timer);
2974}
2975EXPORT_SYMBOL(cancel_delayed_work_sync);
2976
2977/**
2978 * schedule_work - put work task in global workqueue
2979 * @work: job to be done
2980 *
2981 * Returns zero if @work was already on the kernel-global workqueue and
2982 * non-zero otherwise.
2983 *
2984 * This puts a job in the kernel-global workqueue if it was not already
2985 * queued and leaves it in the same position on the kernel-global
2986 * workqueue otherwise.
2987 */
2988int schedule_work(struct work_struct *work)
2989{
2990        return queue_work(system_wq, work);
2991}
2992EXPORT_SYMBOL(schedule_work);
2993
2994/*
2995 * schedule_work_on - put work task on a specific cpu
2996 * @cpu: cpu to put the work task on
2997 * @work: job to be done
2998 *
2999 * This puts a job on a specific cpu
3000 */
3001int schedule_work_on(int cpu, struct work_struct *work)
3002{
3003        return queue_work_on(cpu, system_wq, work);
3004}
3005EXPORT_SYMBOL(schedule_work_on);
3006
3007/**
3008 * schedule_delayed_work - put work task in global workqueue after delay
3009 * @dwork: job to be done
3010 * @delay: number of jiffies to wait or 0 for immediate execution
3011 *
3012 * After waiting for a given time this puts a job in the kernel-global
3013 * workqueue.
3014 */
3015int schedule_delayed_work(struct delayed_work *dwork,
3016                                        unsigned long delay)
3017{
3018        return queue_delayed_work(system_wq, dwork, delay);
3019}
3020EXPORT_SYMBOL(schedule_delayed_work);
3021
3022/**
3023 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
3024 * @cpu: cpu to use
3025 * @dwork: job to be done
3026 * @delay: number of jiffies to wait
3027 *
3028 * After waiting for a given time this puts a job in the kernel-global
3029 * workqueue on the specified CPU.
3030 */
3031int schedule_delayed_work_on(int cpu,
3032                        struct delayed_work *dwork, unsigned long delay)
3033{
3034        return queue_delayed_work_on(cpu, system_wq, dwork, delay);
3035}
3036EXPORT_SYMBOL(schedule_delayed_work_on);
3037
3038/**
3039 * schedule_on_each_cpu - execute a function synchronously on each online CPU
3040 * @func: the function to call
3041 *
3042 * schedule_on_each_cpu() executes @func on each online CPU using the
3043 * system workqueue and blocks until all CPUs have completed.
3044 * schedule_on_each_cpu() is very slow.
3045 *
3046 * RETURNS:
3047 * 0 on success, -errno on failure.
3048 */
3049int schedule_on_each_cpu(work_func_t func)
3050{
3051        int cpu;
3052        struct work_struct __percpu *works;
3053
3054        works = alloc_percpu(struct work_struct);
3055        if (!works)
3056                return -ENOMEM;
3057
3058        get_online_cpus();
3059
3060        for_each_online_cpu(cpu) {
3061                struct work_struct *work = per_cpu_ptr(works, cpu);
3062
3063                INIT_WORK(work, func);
3064                schedule_work_on(cpu, work);
3065        }
3066
3067        for_each_online_cpu(cpu)
3068                flush_work(per_cpu_ptr(works, cpu));
3069
3070        put_online_cpus();
3071        free_percpu(works);
3072        return 0;
3073}
3074
3075/**
3076 * flush_scheduled_work - ensure that any scheduled work has run to completion.
3077 *
3078 * Forces execution of the kernel-global workqueue and blocks until its
3079 * completion.
3080 *
3081 * Think twice before calling this function!  It's very easy to get into
3082 * trouble if you don't take great care.  Either of the following situations
3083 * will lead to deadlock:
3084 *
3085 *      One of the work items currently on the workqueue needs to acquire
3086 *      a lock held by your code or its caller.
3087 *
3088 *      Your code is running in the context of a work routine.
3089 *
3090 * They will be detected by lockdep when they occur, but the first might not
3091 * occur very often.  It depends on what work items are on the workqueue and
3092 * what locks they need, which you have no control over.
3093 *
3094 * In most situations flushing the entire workqueue is overkill; you merely
3095 * need to know that a particular work item isn't queued and isn't running.
3096 * In such cases you should use cancel_delayed_work_sync() or
3097 * cancel_work_sync() instead.
3098 */
3099void flush_scheduled_work(void)
3100{
3101        flush_workqueue(system_wq);
3102}
3103EXPORT_SYMBOL(flush_scheduled_work);
3104
3105/**
3106 * execute_in_process_context - reliably execute the routine with user context
3107 * @fn:         the function to execute
3108 * @ew:         guaranteed storage for the execute work structure (must
3109 *              be available when the work executes)
3110 *
3111 * Executes the function immediately if process context is available,
3112 * otherwise schedules the function for delayed execution.
3113 *
3114 * Returns:     0 - function was executed
3115 *              1 - function was scheduled for execution
3116 */
3117int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3118{
3119        if (!in_interrupt()) {
3120                fn(&ew->work);
3121                return 0;
3122        }
3123
3124        INIT_WORK(&ew->work, fn);
3125        schedule_work(&ew->work);
3126
3127        return 1;
3128}
3129EXPORT_SYMBOL_GPL(execute_in_process_context);
3130
3131int keventd_up(void)
3132{
3133        return system_wq != NULL;
3134}
3135
3136static int alloc_cwqs(struct workqueue_struct *wq)
3137{
3138        /*
3139         * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
3140         * Make sure that the alignment isn't lower than that of
3141         * unsigned long long.
3142         */
3143        const size_t size = sizeof(struct cpu_workqueue_struct);
3144        const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
3145                                   __alignof__(unsigned long long));
3146
3147        if (!(wq->flags & WQ_UNBOUND))
3148                wq->cpu_wq.pcpu = __alloc_percpu(size, align);
3149        else {
3150                void *ptr;
3151
3152                /*
3153                 * Allocate enough room to align cwq and put an extra
3154                 * pointer at the end pointing back to the originally
3155                 * allocated pointer which will be used for free.
3156                 */
3157                ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
3158                if (ptr) {
3159                        wq->cpu_wq.single = PTR_ALIGN(ptr, align);
3160                        *(void **)(wq->cpu_wq.single + 1) = ptr;
3161                }
3162        }
3163
3164        /* just in case, make sure it's actually aligned */
3165        BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
3166        return wq->cpu_wq.v ? 0 : -ENOMEM;
3167}
3168
3169static void free_cwqs(struct workqueue_struct *wq)
3170{
3171        if (!(wq->flags & WQ_UNBOUND))
3172                free_percpu(wq->cpu_wq.pcpu);
3173        else if (wq->cpu_wq.single) {
3174                /* the pointer to free is stored right after the cwq */
3175                kfree(*(void **)(wq->cpu_wq.single + 1));
3176        }
3177}
3178
3179static int wq_clamp_max_active(int max_active, unsigned int flags,
3180                               const char *name)
3181{
3182        int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
3183
3184        if (max_active < 1 || max_active > lim)
3185                printk(KERN_WARNING "workqueue: max_active %d requested for %s "
3186                       "is out of range, clamping between %d and %d\n",
3187                       max_active, name, 1, lim);
3188
3189        return clamp_val(max_active, 1, lim);
3190}
3191
3192struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3193                                               unsigned int flags,
3194                                               int max_active,
3195                                               struct lock_class_key *key,
3196                                               const char *lock_name, ...)
3197{
3198        va_list args, args1;
3199        struct workqueue_struct *wq;
3200        unsigned int cpu;
3201        size_t namelen;
3202
3203        /* determine namelen, allocate wq and format name */
3204        va_start(args, lock_name);
3205        va_copy(args1, args);
3206        namelen = vsnprintf(NULL, 0, fmt, args) + 1;
3207
3208        wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
3209        if (!wq)
3210                goto err;
3211
3212        vsnprintf(wq->name, namelen, fmt, args1);
3213        va_end(args);
3214        va_end(args1);
3215
3216        /*
3217         * Workqueues which may be used during memory reclaim should
3218         * have a rescuer to guarantee forward progress.
3219         */
3220        if (flags & WQ_MEM_RECLAIM)
3221                flags |= WQ_RESCUER;
3222
3223        max_active = max_active ?: WQ_DFL_ACTIVE;
3224        max_active = wq_clamp_max_active(max_active, flags, wq->name);
3225
3226        /* init wq */
3227        wq->flags = flags;
3228        wq->saved_max_active = max_active;
3229        mutex_init(&wq->flush_mutex);
3230        atomic_set(&wq->nr_cwqs_to_flush, 0);
3231        INIT_LIST_HEAD(&wq->flusher_queue);
3232        INIT_LIST_HEAD(&wq->flusher_overflow);
3233
3234        lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
3235        INIT_LIST_HEAD(&wq->list);
3236
3237        if (alloc_cwqs(wq) < 0)
3238                goto err;
3239
3240        for_each_cwq_cpu(cpu, wq) {
3241                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3242                struct global_cwq *gcwq = get_gcwq(cpu);
3243                int pool_idx = (bool)(flags & WQ_HIGHPRI);
3244
3245                BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
3246                cwq->pool = &gcwq->pools[pool_idx];
3247                cwq->wq = wq;
3248                cwq->flush_color = -1;
3249                cwq->max_active = max_active;
3250                INIT_LIST_HEAD(&cwq->delayed_works);
3251        }
3252
3253        if (flags & WQ_RESCUER) {
3254                struct worker *rescuer;
3255
3256                if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
3257                        goto err;
3258
3259                wq->rescuer = rescuer = alloc_worker();
3260                if (!rescuer)
3261                        goto err;
3262
3263                rescuer->task = kthread_create(rescuer_thread, wq, "%s",
3264                                               wq->name);
3265                if (IS_ERR(rescuer->task))
3266                        goto err;
3267
3268                rescuer->task->flags |= PF_THREAD_BOUND;
3269                wake_up_process(rescuer->task);
3270        }
3271
3272        /*
3273         * workqueue_lock protects global freeze state and workqueues
3274         * list.  Grab it, set max_active accordingly and add the new
3275         * workqueue to workqueues list.
3276         */
3277        spin_lock(&workqueue_lock);
3278
3279        if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
3280                for_each_cwq_cpu(cpu, wq)
3281                        get_cwq(cpu, wq)->max_active = 0;
3282
3283        list_add(&wq->list, &workqueues);
3284
3285        spin_unlock(&workqueue_lock);
3286
3287        return wq;
3288err:
3289        if (wq) {
3290                free_cwqs(wq);
3291                free_mayday_mask(wq->mayday_mask);
3292                kfree(wq->rescuer);
3293                kfree(wq);
3294        }
3295        return NULL;
3296}
3297EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
3298
3299/**
3300 * destroy_workqueue - safely terminate a workqueue
3301 * @wq: target workqueue
3302 *
3303 * Safely destroy a workqueue. All work currently pending will be done first.
3304 */
3305void destroy_workqueue(struct workqueue_struct *wq)
3306{
3307        unsigned int cpu;
3308
3309        /* drain it before proceeding with destruction */
3310        drain_workqueue(wq);
3311
3312        /*
3313         * wq list is used to freeze wq, remove from list after
3314         * flushing is complete in case freeze races us.
3315         */
3316        spin_lock(&workqueue_lock);
3317        list_del(&wq->list);
3318        spin_unlock(&workqueue_lock);
3319
3320        /* sanity check */
3321        for_each_cwq_cpu(cpu, wq) {
3322                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3323                int i;
3324
3325                for (i = 0; i < WORK_NR_COLORS; i++)
3326                        BUG_ON(cwq->nr_in_flight[i]);
3327                BUG_ON(cwq->nr_active);
3328                BUG_ON(!list_empty(&cwq->delayed_works));
3329        }
3330
3331        if (wq->flags & WQ_RESCUER) {
3332                kthread_stop(wq->rescuer->task);
3333                free_mayday_mask(wq->mayday_mask);
3334                kfree(wq->rescuer);
3335        }
3336
3337        free_cwqs(wq);
3338        kfree(wq);
3339}
3340EXPORT_SYMBOL_GPL(destroy_workqueue);
3341
3342/**
3343 * workqueue_set_max_active - adjust max_active of a workqueue
3344 * @wq: target workqueue
3345 * @max_active: new max_active value.
3346 *
3347 * Set max_active of @wq to @max_active.
3348 *
3349 * CONTEXT:
3350 * Don't call from IRQ context.
3351 */
3352void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3353{
3354        unsigned int cpu;
3355
3356        max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
3357
3358        spin_lock(&workqueue_lock);
3359
3360        wq->saved_max_active = max_active;
3361
3362        for_each_cwq_cpu(cpu, wq) {
3363                struct global_cwq *gcwq = get_gcwq(cpu);
3364
3365                spin_lock_irq(&gcwq->lock);
3366
3367                if (!(wq->flags & WQ_FREEZABLE) ||
3368                    !(gcwq->flags & GCWQ_FREEZING))
3369                        get_cwq(gcwq->cpu, wq)->max_active = max_active;
3370
3371                spin_unlock_irq(&gcwq->lock);
3372        }
3373
3374        spin_unlock(&workqueue_lock);
3375}
3376EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3377
3378/**
3379 * workqueue_congested - test whether a workqueue is congested
3380 * @cpu: CPU in question
3381 * @wq: target workqueue
3382 *
3383 * Test whether @wq's cpu workqueue for @cpu is congested.  There is
3384 * no synchronization around this function and the test result is
3385 * unreliable and only useful as advisory hints or for debugging.
3386 *
3387 * RETURNS:
3388 * %true if congested, %false otherwise.
3389 */
3390bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3391{
3392        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3393
3394        return !list_empty(&cwq->delayed_works);
3395}
3396EXPORT_SYMBOL_GPL(workqueue_congested);
3397
3398/**
3399 * work_cpu - return the last known associated cpu for @work
3400 * @work: the work of interest
3401 *
3402 * RETURNS:
3403 * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise.
3404 */
3405unsigned int work_cpu(struct work_struct *work)
3406{
3407        struct global_cwq *gcwq = get_work_gcwq(work);
3408
3409        return gcwq ? gcwq->cpu : WORK_CPU_NONE;
3410}
3411EXPORT_SYMBOL_GPL(work_cpu);
3412
3413/**
3414 * work_busy - test whether a work is currently pending or running
3415 * @work: the work to be tested
3416 *
3417 * Test whether @work is currently pending or running.  There is no
3418 * synchronization around this function and the test result is
3419 * unreliable and only useful as advisory hints or for debugging.
3420 * Especially for reentrant wqs, the pending state might hide the
3421 * running state.
3422 *
3423 * RETURNS:
3424 * OR'd bitmask of WORK_BUSY_* bits.
3425 */
3426unsigned int work_busy(struct work_struct *work)
3427{
3428        struct global_cwq *gcwq = get_work_gcwq(work);
3429        unsigned long flags;
3430        unsigned int ret = 0;
3431
3432        if (!gcwq)
3433                return false;
3434
3435        spin_lock_irqsave(&gcwq->lock, flags);
3436
3437        if (work_pending(work))
3438                ret |= WORK_BUSY_PENDING;
3439        if (find_worker_executing_work(gcwq, work))
3440                ret |= WORK_BUSY_RUNNING;
3441
3442        spin_unlock_irqrestore(&gcwq->lock, flags);
3443
3444        return ret;
3445}
3446EXPORT_SYMBOL_GPL(work_busy);
3447
3448/*
3449 * CPU hotplug.
3450 *
3451 * There are two challenges in supporting CPU hotplug.  Firstly, there
3452 * are a lot of assumptions on strong associations among work, cwq and
3453 * gcwq which make migrating pending and scheduled works very
3454 * difficult to implement without impacting hot paths.  Secondly,
3455 * gcwqs serve mix of short, long and very long running works making
3456 * blocked draining impractical.
3457 *
3458 * This is solved by allowing a gcwq to be disassociated from the CPU
3459 * running as an unbound one and allowing it to be reattached later if the
3460 * cpu comes back online.
3461 */
3462
3463/* claim manager positions of all pools */
3464static void gcwq_claim_management_and_lock(struct global_cwq *gcwq)
3465{
3466        struct worker_pool *pool;
3467
3468        for_each_worker_pool(pool, gcwq)
3469                mutex_lock_nested(&pool->manager_mutex, pool - gcwq->pools);
3470        spin_lock_irq(&gcwq->lock);
3471}
3472
3473/* release manager positions */
3474static void gcwq_release_management_and_unlock(struct global_cwq *gcwq)
3475{
3476        struct worker_pool *pool;
3477
3478        spin_unlock_irq(&gcwq->lock);
3479        for_each_worker_pool(pool, gcwq)
3480                mutex_unlock(&pool->manager_mutex);
3481}
3482
3483static void gcwq_unbind_fn(struct work_struct *work)
3484{
3485        struct global_cwq *gcwq = get_gcwq(smp_processor_id());
3486        struct worker_pool *pool;
3487        struct worker *worker;
3488        struct hlist_node *pos;
3489        int i;
3490
3491        BUG_ON(gcwq->cpu != smp_processor_id());
3492
3493        gcwq_claim_management_and_lock(gcwq);
3494
3495        /*
3496         * We've claimed all manager positions.  Make all workers unbound
3497         * and set DISASSOCIATED.  Before this, all workers except for the
3498         * ones which are still executing works from before the last CPU
3499         * down must be on the cpu.  After this, they may become diasporas.
3500         */
3501        for_each_worker_pool(pool, gcwq)
3502                list_for_each_entry(worker, &pool->idle_list, entry)
3503                        worker->flags |= WORKER_UNBOUND;
3504
3505        for_each_busy_worker(worker, i, pos, gcwq)
3506                worker->flags |= WORKER_UNBOUND;
3507
3508        gcwq->flags |= GCWQ_DISASSOCIATED;
3509
3510        gcwq_release_management_and_unlock(gcwq);
3511
3512        /*
3513         * Call schedule() so that we cross rq->lock and thus can guarantee
3514         * sched callbacks see the %WORKER_UNBOUND flag.  This is necessary
3515         * as scheduler callbacks may be invoked from other cpus.
3516         */
3517        schedule();
3518
3519        /*
3520         * Sched callbacks are disabled now.  Zap nr_running.  After this,
3521         * nr_running stays zero and need_more_worker() and keep_working()
3522         * are always true as long as the worklist is not empty.  @gcwq now
3523         * behaves as unbound (in terms of concurrency management) gcwq
3524         * which is served by workers tied to the CPU.
3525         *
3526         * On return from this function, the current worker would trigger
3527         * unbound chain execution of pending work items if other workers
3528         * didn't already.
3529         */
3530        for_each_worker_pool(pool, gcwq)
3531                atomic_set(get_pool_nr_running(pool), 0);
3532}
3533
3534/*
3535 * Workqueues should be brought up before normal priority CPU notifiers.
3536 * This will be registered high priority CPU notifier.
3537 */
3538static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3539                                               unsigned long action,
3540                                               void *hcpu)
3541{
3542        unsigned int cpu = (unsigned long)hcpu;
3543        struct global_cwq *gcwq = get_gcwq(cpu);
3544        struct worker_pool *pool;
3545
3546        switch (action & ~CPU_TASKS_FROZEN) {
3547        case CPU_UP_PREPARE:
3548                for_each_worker_pool(pool, gcwq) {
3549                        struct worker *worker;
3550
3551                        if (pool->nr_workers)
3552                                continue;
3553
3554                        worker = create_worker(pool);
3555                        if (!worker)
3556                                return NOTIFY_BAD;
3557
3558                        spin_lock_irq(&gcwq->lock);
3559                        start_worker(worker);
3560                        spin_unlock_irq(&gcwq->lock);
3561                }
3562                break;
3563
3564        case CPU_DOWN_FAILED:
3565        case CPU_ONLINE:
3566                gcwq_claim_management_and_lock(gcwq);
3567                gcwq->flags &= ~GCWQ_DISASSOCIATED;
3568                rebind_workers(gcwq);
3569                gcwq_release_management_and_unlock(gcwq);
3570                break;
3571        }
3572        return NOTIFY_OK;
3573}
3574
3575/*
3576 * Workqueues should be brought down after normal priority CPU notifiers.
3577 * This will be registered as low priority CPU notifier.
3578 */
3579static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
3580                                                 unsigned long action,
3581                                                 void *hcpu)
3582{
3583        unsigned int cpu = (unsigned long)hcpu;
3584        struct work_struct unbind_work;
3585
3586        switch (action & ~CPU_TASKS_FROZEN) {
3587        case CPU_DOWN_PREPARE:
3588                /* unbinding should happen on the local CPU */
3589                INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
3590                schedule_work_on(cpu, &unbind_work);
3591                flush_work(&unbind_work);
3592                break;
3593        }
3594        return NOTIFY_OK;
3595}
3596
3597#ifdef CONFIG_SMP
3598
3599struct work_for_cpu {
3600        struct work_struct work;
3601        long (*fn)(void *);
3602        void *arg;
3603        long ret;
3604};
3605
3606static void work_for_cpu_fn(struct work_struct *work)
3607{
3608        struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
3609
3610        wfc->ret = wfc->fn(wfc->arg);
3611}
3612
3613/**
3614 * work_on_cpu - run a function in user context on a particular cpu
3615 * @cpu: the cpu to run on
3616 * @fn: the function to run
3617 * @arg: the function arg
3618 *
3619 * This will return the value @fn returns.
3620 * It is up to the caller to ensure that the cpu doesn't go offline.
3621 * The caller must not hold any locks which would prevent @fn from completing.
3622 */
3623long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3624{
3625        struct work_for_cpu wfc = { .fn = fn, .arg = arg };
3626
3627        INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
3628        schedule_work_on(cpu, &wfc.work);
3629        flush_work(&wfc.work);
3630        return wfc.ret;
3631}
3632EXPORT_SYMBOL_GPL(work_on_cpu);
3633#endif /* CONFIG_SMP */
3634
3635#ifdef CONFIG_FREEZER
3636
3637/**
3638 * freeze_workqueues_begin - begin freezing workqueues
3639 *
3640 * Start freezing workqueues.  After this function returns, all freezable
3641 * workqueues will queue new works to their frozen_works list instead of
3642 * gcwq->worklist.
3643 *
3644 * CONTEXT:
3645 * Grabs and releases workqueue_lock and gcwq->lock's.
3646 */
3647void freeze_workqueues_begin(void)
3648{
3649        unsigned int cpu;
3650
3651        spin_lock(&workqueue_lock);
3652
3653        BUG_ON(workqueue_freezing);
3654        workqueue_freezing = true;
3655
3656        for_each_gcwq_cpu(cpu) {
3657                struct global_cwq *gcwq = get_gcwq(cpu);
3658                struct workqueue_struct *wq;
3659
3660                spin_lock_irq(&gcwq->lock);
3661
3662                BUG_ON(gcwq->flags & GCWQ_FREEZING);
3663                gcwq->flags |= GCWQ_FREEZING;
3664
3665                list_for_each_entry(wq, &workqueues, list) {
3666                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3667
3668                        if (cwq && wq->flags & WQ_FREEZABLE)
3669                                cwq->max_active = 0;
3670                }
3671
3672                spin_unlock_irq(&gcwq->lock);
3673        }
3674
3675        spin_unlock(&workqueue_lock);
3676}
3677
3678/**
3679 * freeze_workqueues_busy - are freezable workqueues still busy?
3680 *
3681 * Check whether freezing is complete.  This function must be called
3682 * between freeze_workqueues_begin() and thaw_workqueues().
3683 *
3684 * CONTEXT:
3685 * Grabs and releases workqueue_lock.
3686 *
3687 * RETURNS:
3688 * %true if some freezable workqueues are still busy.  %false if freezing
3689 * is complete.
3690 */
3691bool freeze_workqueues_busy(void)
3692{
3693        unsigned int cpu;
3694        bool busy = false;
3695
3696        spin_lock(&workqueue_lock);
3697
3698        BUG_ON(!workqueue_freezing);
3699
3700        for_each_gcwq_cpu(cpu) {
3701                struct workqueue_struct *wq;
3702                /*
3703                 * nr_active is monotonically decreasing.  It's safe
3704                 * to peek without lock.
3705                 */
3706                list_for_each_entry(wq, &workqueues, list) {
3707                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3708
3709                        if (!cwq || !(wq->flags & WQ_FREEZABLE))
3710                                continue;
3711
3712                        BUG_ON(cwq->nr_active < 0);
3713                        if (cwq->nr_active) {
3714                                busy = true;
3715                                goto out_unlock;
3716                        }
3717                }
3718        }
3719out_unlock:
3720        spin_unlock(&workqueue_lock);
3721        return busy;
3722}
3723
3724/**
3725 * thaw_workqueues - thaw workqueues
3726 *
3727 * Thaw workqueues.  Normal queueing is restored and all collected
3728 * frozen works are transferred to their respective gcwq worklists.
3729 *
3730 * CONTEXT:
3731 * Grabs and releases workqueue_lock and gcwq->lock's.
3732 */
3733void thaw_workqueues(void)
3734{
3735        unsigned int cpu;
3736
3737        spin_lock(&workqueue_lock);
3738
3739        if (!workqueue_freezing)
3740                goto out_unlock;
3741
3742        for_each_gcwq_cpu(cpu) {
3743                struct global_cwq *gcwq = get_gcwq(cpu);
3744                struct worker_pool *pool;
3745                struct workqueue_struct *wq;
3746
3747                spin_lock_irq(&gcwq->lock);
3748
3749                BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3750                gcwq->flags &= ~GCWQ_FREEZING;
3751
3752                list_for_each_entry(wq, &workqueues, list) {
3753                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3754
3755                        if (!cwq || !(wq->flags & WQ_FREEZABLE))
3756                                continue;
3757
3758                        /* restore max_active and repopulate worklist */
3759                        cwq->max_active = wq->saved_max_active;
3760
3761                        while (!list_empty(&cwq->delayed_works) &&
3762                               cwq->nr_active < cwq->max_active)
3763                                cwq_activate_first_delayed(cwq);
3764                }
3765
3766                for_each_worker_pool(pool, gcwq)
3767                        wake_up_worker(pool);
3768
3769                spin_unlock_irq(&gcwq->lock);
3770        }
3771
3772        workqueue_freezing = false;
3773out_unlock:
3774        spin_unlock(&workqueue_lock);
3775}
3776#endif /* CONFIG_FREEZER */
3777
3778static int __init init_workqueues(void)
3779{
3780        unsigned int cpu;
3781        int i;
3782
3783        cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
3784        cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
3785
3786        /* initialize gcwqs */
3787        for_each_gcwq_cpu(cpu) {
3788                struct global_cwq *gcwq = get_gcwq(cpu);
3789                struct worker_pool *pool;
3790
3791                spin_lock_init(&gcwq->lock);
3792                gcwq->cpu = cpu;
3793                gcwq->flags |= GCWQ_DISASSOCIATED;
3794
3795                for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3796                        INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3797
3798                for_each_worker_pool(pool, gcwq) {
3799                        pool->gcwq = gcwq;
3800                        INIT_LIST_HEAD(&pool->worklist);
3801                        INIT_LIST_HEAD(&pool->idle_list);
3802
3803                        init_timer_deferrable(&pool->idle_timer);
3804                        pool->idle_timer.function = idle_worker_timeout;
3805                        pool->idle_timer.data = (unsigned long)pool;
3806
3807                        setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
3808                                    (unsigned long)pool);
3809
3810                        mutex_init(&pool->manager_mutex);
3811                        ida_init(&pool->worker_ida);
3812                }
3813
3814                init_waitqueue_head(&gcwq->rebind_hold);
3815        }
3816
3817        /* create the initial worker */
3818        for_each_online_gcwq_cpu(cpu) {
3819                struct global_cwq *gcwq = get_gcwq(cpu);
3820                struct worker_pool *pool;
3821
3822                if (cpu != WORK_CPU_UNBOUND)
3823                        gcwq->flags &= ~GCWQ_DISASSOCIATED;
3824
3825                for_each_worker_pool(pool, gcwq) {
3826                        struct worker *worker;
3827
3828                        worker = create_worker(pool);
3829                        BUG_ON(!worker);
3830                        spin_lock_irq(&gcwq->lock);
3831                        start_worker(worker);
3832                        spin_unlock_irq(&gcwq->lock);
3833                }
3834        }
3835
3836        system_wq = alloc_workqueue("events", 0, 0);
3837        system_long_wq = alloc_workqueue("events_long", 0, 0);
3838        system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3839        system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3840                                            WQ_UNBOUND_MAX_ACTIVE);
3841        system_freezable_wq = alloc_workqueue("events_freezable",
3842                                              WQ_FREEZABLE, 0);
3843        system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
3844                        WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
3845        BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
3846               !system_unbound_wq || !system_freezable_wq ||
3847                !system_nrt_freezable_wq);
3848        return 0;
3849}
3850early_initcall(init_workqueues);
3851
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.