linux/include/linux/workqueue.h
<<
>>
Prefs
   1/*
   2 * workqueue.h --- work queue handling for Linux.
   3 */
   4
   5#ifndef _LINUX_WORKQUEUE_H
   6#define _LINUX_WORKQUEUE_H
   7
   8#include <linux/timer.h>
   9#include <linux/linkage.h>
  10#include <linux/bitops.h>
  11#include <linux/lockdep.h>
  12#include <linux/threads.h>
  13#include <linux/atomic.h>
  14
  15struct workqueue_struct;
  16
  17struct work_struct;
  18typedef void (*work_func_t)(struct work_struct *work);
  19void delayed_work_timer_fn(unsigned long __data);
  20
  21/*
  22 * The first word is the work queue pointer and the flags rolled into
  23 * one
  24 */
  25#define work_data_bits(work) ((unsigned long *)(&(work)->data))
  26
  27enum {
  28        WORK_STRUCT_PENDING_BIT = 0,    /* work item is pending execution */
  29        WORK_STRUCT_DELAYED_BIT = 1,    /* work item is delayed */
  30        WORK_STRUCT_CWQ_BIT     = 2,    /* data points to cwq */
  31        WORK_STRUCT_LINKED_BIT  = 3,    /* next work is linked to this one */
  32#ifdef CONFIG_DEBUG_OBJECTS_WORK
  33        WORK_STRUCT_STATIC_BIT  = 4,    /* static initializer (debugobjects) */
  34        WORK_STRUCT_COLOR_SHIFT = 5,    /* color for workqueue flushing */
  35#else
  36        WORK_STRUCT_COLOR_SHIFT = 4,    /* color for workqueue flushing */
  37#endif
  38
  39        WORK_STRUCT_COLOR_BITS  = 4,
  40
  41        WORK_STRUCT_PENDING     = 1 << WORK_STRUCT_PENDING_BIT,
  42        WORK_STRUCT_DELAYED     = 1 << WORK_STRUCT_DELAYED_BIT,
  43        WORK_STRUCT_CWQ         = 1 << WORK_STRUCT_CWQ_BIT,
  44        WORK_STRUCT_LINKED      = 1 << WORK_STRUCT_LINKED_BIT,
  45#ifdef CONFIG_DEBUG_OBJECTS_WORK
  46        WORK_STRUCT_STATIC      = 1 << WORK_STRUCT_STATIC_BIT,
  47#else
  48        WORK_STRUCT_STATIC      = 0,
  49#endif
  50
  51        /*
  52         * The last color is no color used for works which don't
  53         * participate in workqueue flushing.
  54         */
  55        WORK_NR_COLORS          = (1 << WORK_STRUCT_COLOR_BITS) - 1,
  56        WORK_NO_COLOR           = WORK_NR_COLORS,
  57
  58        /* special cpu IDs */
  59        WORK_CPU_UNBOUND        = NR_CPUS,
  60        WORK_CPU_NONE           = NR_CPUS + 1,
  61        WORK_CPU_LAST           = WORK_CPU_NONE,
  62
  63        /*
  64         * Reserve 7 bits off of cwq pointer w/ debugobjects turned
  65         * off.  This makes cwqs aligned to 256 bytes and allows 15
  66         * workqueue flush colors.
  67         */
  68        WORK_STRUCT_FLAG_BITS   = WORK_STRUCT_COLOR_SHIFT +
  69                                  WORK_STRUCT_COLOR_BITS,
  70
  71        /* data contains off-queue information when !WORK_STRUCT_CWQ */
  72        WORK_OFFQ_FLAG_BASE     = WORK_STRUCT_FLAG_BITS,
  73
  74        WORK_OFFQ_CANCELING     = (1 << WORK_OFFQ_FLAG_BASE),
  75
  76        WORK_OFFQ_FLAG_BITS     = 1,
  77        WORK_OFFQ_CPU_SHIFT     = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
  78
  79        /* convenience constants */
  80        WORK_STRUCT_FLAG_MASK   = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
  81        WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
  82        WORK_STRUCT_NO_CPU      = (unsigned long)WORK_CPU_NONE << WORK_OFFQ_CPU_SHIFT,
  83
  84        /* bit mask for work_busy() return values */
  85        WORK_BUSY_PENDING       = 1 << 0,
  86        WORK_BUSY_RUNNING       = 1 << 1,
  87};
  88
  89struct work_struct {
  90        atomic_long_t data;
  91        struct list_head entry;
  92        work_func_t func;
  93#ifdef CONFIG_LOCKDEP
  94        struct lockdep_map lockdep_map;
  95#endif
  96};
  97
  98#define WORK_DATA_INIT()        ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
  99#define WORK_DATA_STATIC_INIT() \
 100        ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
 101
 102struct delayed_work {
 103        struct work_struct work;
 104        struct timer_list timer;
 105        int cpu;
 106};
 107
 108static inline struct delayed_work *to_delayed_work(struct work_struct *work)
 109{
 110        return container_of(work, struct delayed_work, work);
 111}
 112
 113struct execute_work {
 114        struct work_struct work;
 115};
 116
 117#ifdef CONFIG_LOCKDEP
 118/*
 119 * NB: because we have to copy the lockdep_map, setting _key
 120 * here is required, otherwise it could get initialised to the
 121 * copy of the lockdep_map!
 122 */
 123#define __WORK_INIT_LOCKDEP_MAP(n, k) \
 124        .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
 125#else
 126#define __WORK_INIT_LOCKDEP_MAP(n, k)
 127#endif
 128
 129#define __WORK_INITIALIZER(n, f) {                                      \
 130        .data = WORK_DATA_STATIC_INIT(),                                \
 131        .entry  = { &(n).entry, &(n).entry },                           \
 132        .func = (f),                                                    \
 133        __WORK_INIT_LOCKDEP_MAP(#n, &(n))                               \
 134        }
 135
 136#define __DELAYED_WORK_INITIALIZER(n, f, tflags) {                      \
 137        .work = __WORK_INITIALIZER((n).work, (f)),                      \
 138        .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,             \
 139                                     0, (unsigned long)&(n),            \
 140                                     (tflags) | TIMER_IRQSAFE),         \
 141        }
 142
 143#define DECLARE_WORK(n, f)                                              \
 144        struct work_struct n = __WORK_INITIALIZER(n, f)
 145
 146#define DECLARE_DELAYED_WORK(n, f)                                      \
 147        struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
 148
 149#define DECLARE_DEFERRABLE_WORK(n, f)                                   \
 150        struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
 151
 152/*
 153 * initialize a work item's function pointer
 154 */
 155#define PREPARE_WORK(_work, _func)                                      \
 156        do {                                                            \
 157                (_work)->func = (_func);                                \
 158        } while (0)
 159
 160#define PREPARE_DELAYED_WORK(_work, _func)                              \
 161        PREPARE_WORK(&(_work)->work, (_func))
 162
 163#ifdef CONFIG_DEBUG_OBJECTS_WORK
 164extern void __init_work(struct work_struct *work, int onstack);
 165extern void destroy_work_on_stack(struct work_struct *work);
 166static inline unsigned int work_static(struct work_struct *work)
 167{
 168        return *work_data_bits(work) & WORK_STRUCT_STATIC;
 169}
 170#else
 171static inline void __init_work(struct work_struct *work, int onstack) { }
 172static inline void destroy_work_on_stack(struct work_struct *work) { }
 173static inline unsigned int work_static(struct work_struct *work) { return 0; }
 174#endif
 175
 176/*
 177 * initialize all of a work item in one go
 178 *
 179 * NOTE! No point in using "atomic_long_set()": using a direct
 180 * assignment of the work data initializer allows the compiler
 181 * to generate better code.
 182 */
 183#ifdef CONFIG_LOCKDEP
 184#define __INIT_WORK(_work, _func, _onstack)                             \
 185        do {                                                            \
 186                static struct lock_class_key __key;                     \
 187                                                                        \
 188                __init_work((_work), _onstack);                         \
 189                (_work)->data = (atomic_long_t) WORK_DATA_INIT();       \
 190                lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
 191                INIT_LIST_HEAD(&(_work)->entry);                        \
 192                PREPARE_WORK((_work), (_func));                         \
 193        } while (0)
 194#else
 195#define __INIT_WORK(_work, _func, _onstack)                             \
 196        do {                                                            \
 197                __init_work((_work), _onstack);                         \
 198                (_work)->data = (atomic_long_t) WORK_DATA_INIT();       \
 199                INIT_LIST_HEAD(&(_work)->entry);                        \
 200                PREPARE_WORK((_work), (_func));                         \
 201        } while (0)
 202#endif
 203
 204#define INIT_WORK(_work, _func)                                         \
 205        do {                                                            \
 206                __INIT_WORK((_work), (_func), 0);                       \
 207        } while (0)
 208
 209#define INIT_WORK_ONSTACK(_work, _func)                                 \
 210        do {                                                            \
 211                __INIT_WORK((_work), (_func), 1);                       \
 212        } while (0)
 213
 214#define __INIT_DELAYED_WORK(_work, _func, _tflags)                      \
 215        do {                                                            \
 216                INIT_WORK(&(_work)->work, (_func));                     \
 217                __setup_timer(&(_work)->timer, delayed_work_timer_fn,   \
 218                              (unsigned long)(_work),                   \
 219                              (_tflags) | TIMER_IRQSAFE);               \
 220        } while (0)
 221
 222#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)              \
 223        do {                                                            \
 224                INIT_WORK_ONSTACK(&(_work)->work, (_func));             \
 225                __setup_timer_on_stack(&(_work)->timer,                 \
 226                                       delayed_work_timer_fn,           \
 227                                       (unsigned long)(_work),          \
 228                                       (_tflags) | TIMER_IRQSAFE);      \
 229        } while (0)
 230
 231#define INIT_DELAYED_WORK(_work, _func)                                 \
 232        __INIT_DELAYED_WORK(_work, _func, 0)
 233
 234#define INIT_DELAYED_WORK_ONSTACK(_work, _func)                         \
 235        __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
 236
 237#define INIT_DEFERRABLE_WORK(_work, _func)                              \
 238        __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
 239
 240#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)                      \
 241        __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
 242
 243/**
 244 * work_pending - Find out whether a work item is currently pending
 245 * @work: The work item in question
 246 */
 247#define work_pending(work) \
 248        test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
 249
 250/**
 251 * delayed_work_pending - Find out whether a delayable work item is currently
 252 * pending
 253 * @work: The work item in question
 254 */
 255#define delayed_work_pending(w) \
 256        work_pending(&(w)->work)
 257
 258/**
 259 * work_clear_pending - for internal use only, mark a work item as not pending
 260 * @work: The work item in question
 261 */
 262#define work_clear_pending(work) \
 263        clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
 264
 265/*
 266 * Workqueue flags and constants.  For details, please refer to
 267 * Documentation/workqueue.txt.
 268 */
 269enum {
 270        WQ_NON_REENTRANT        = 1 << 0, /* guarantee non-reentrance */
 271        WQ_UNBOUND              = 1 << 1, /* not bound to any cpu */
 272        WQ_FREEZABLE            = 1 << 2, /* freeze during suspend */
 273        WQ_MEM_RECLAIM          = 1 << 3, /* may be used for memory reclaim */
 274        WQ_HIGHPRI              = 1 << 4, /* high priority */
 275        WQ_CPU_INTENSIVE        = 1 << 5, /* cpu instensive workqueue */
 276
 277        WQ_DRAINING             = 1 << 6, /* internal: workqueue is draining */
 278        WQ_RESCUER              = 1 << 7, /* internal: workqueue has rescuer */
 279
 280        WQ_MAX_ACTIVE           = 512,    /* I like 512, better ideas? */
 281        WQ_MAX_UNBOUND_PER_CPU  = 4,      /* 4 * #cpus for unbound wq */
 282        WQ_DFL_ACTIVE           = WQ_MAX_ACTIVE / 2,
 283};
 284
 285/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
 286#define WQ_UNBOUND_MAX_ACTIVE   \
 287        max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
 288
 289/*
 290 * System-wide workqueues which are always present.
 291 *
 292 * system_wq is the one used by schedule[_delayed]_work[_on]().
 293 * Multi-CPU multi-threaded.  There are users which expect relatively
 294 * short queue flush time.  Don't queue works which can run for too
 295 * long.
 296 *
 297 * system_long_wq is similar to system_wq but may host long running
 298 * works.  Queue flushing might take relatively long.
 299 *
 300 * system_unbound_wq is unbound workqueue.  Workers are not bound to
 301 * any specific CPU, not concurrency managed, and all queued works are
 302 * executed immediately as long as max_active limit is not reached and
 303 * resources are available.
 304 *
 305 * system_freezable_wq is equivalent to system_wq except that it's
 306 * freezable.
 307 */
 308extern struct workqueue_struct *system_wq;
 309extern struct workqueue_struct *system_long_wq;
 310extern struct workqueue_struct *system_unbound_wq;
 311extern struct workqueue_struct *system_freezable_wq;
 312
 313static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
 314{
 315        return system_wq;
 316}
 317
 318static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
 319{
 320        return system_freezable_wq;
 321}
 322
 323/* equivlalent to system_wq and system_freezable_wq, deprecated */
 324#define system_nrt_wq                   __system_nrt_wq()
 325#define system_nrt_freezable_wq         __system_nrt_freezable_wq()
 326
 327extern struct workqueue_struct *
 328__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
 329        struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
 330
 331/**
 332 * alloc_workqueue - allocate a workqueue
 333 * @fmt: printf format for the name of the workqueue
 334 * @flags: WQ_* flags
 335 * @max_active: max in-flight work items, 0 for default
 336 * @args: args for @fmt
 337 *
 338 * Allocate a workqueue with the specified parameters.  For detailed
 339 * information on WQ_* flags, please refer to Documentation/workqueue.txt.
 340 *
 341 * The __lock_name macro dance is to guarantee that single lock_class_key
 342 * doesn't end up with different namesm, which isn't allowed by lockdep.
 343 *
 344 * RETURNS:
 345 * Pointer to the allocated workqueue on success, %NULL on failure.
 346 */
 347#ifdef CONFIG_LOCKDEP
 348#define alloc_workqueue(fmt, flags, max_active, args...)                \
 349({                                                                      \
 350        static struct lock_class_key __key;                             \
 351        const char *__lock_name;                                        \
 352                                                                        \
 353        if (__builtin_constant_p(fmt))                                  \
 354                __lock_name = (fmt);                                    \
 355        else                                                            \
 356                __lock_name = #fmt;                                     \
 357                                                                        \
 358        __alloc_workqueue_key((fmt), (flags), (max_active),             \
 359                              &__key, __lock_name, ##args);             \
 360})
 361#else
 362#define alloc_workqueue(fmt, flags, max_active, args...)                \
 363        __alloc_workqueue_key((fmt), (flags), (max_active),             \
 364                              NULL, NULL, ##args)
 365#endif
 366
 367/**
 368 * alloc_ordered_workqueue - allocate an ordered workqueue
 369 * @fmt: printf format for the name of the workqueue
 370 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
 371 * @args: args for @fmt
 372 *
 373 * Allocate an ordered workqueue.  An ordered workqueue executes at
 374 * most one work item at any given time in the queued order.  They are
 375 * implemented as unbound workqueues with @max_active of one.
 376 *
 377 * RETURNS:
 378 * Pointer to the allocated workqueue on success, %NULL on failure.
 379 */
 380#define alloc_ordered_workqueue(fmt, flags, args...)                    \
 381        alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
 382
 383#define create_workqueue(name)                                          \
 384        alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
 385#define create_freezable_workqueue(name)                                \
 386        alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
 387#define create_singlethread_workqueue(name)                             \
 388        alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
 389
 390extern void destroy_workqueue(struct workqueue_struct *wq);
 391
 392extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
 393                        struct work_struct *work);
 394extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
 395extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
 396                        struct delayed_work *work, unsigned long delay);
 397extern bool queue_delayed_work(struct workqueue_struct *wq,
 398                        struct delayed_work *work, unsigned long delay);
 399extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
 400                        struct delayed_work *dwork, unsigned long delay);
 401extern bool mod_delayed_work(struct workqueue_struct *wq,
 402                        struct delayed_work *dwork, unsigned long delay);
 403
 404extern void flush_workqueue(struct workqueue_struct *wq);
 405extern void drain_workqueue(struct workqueue_struct *wq);
 406extern void flush_scheduled_work(void);
 407
 408extern bool schedule_work_on(int cpu, struct work_struct *work);
 409extern bool schedule_work(struct work_struct *work);
 410extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work,
 411                                     unsigned long delay);
 412extern bool schedule_delayed_work(struct delayed_work *work,
 413                                  unsigned long delay);
 414extern int schedule_on_each_cpu(work_func_t func);
 415extern int keventd_up(void);
 416
 417int execute_in_process_context(work_func_t fn, struct execute_work *);
 418
 419extern bool flush_work(struct work_struct *work);
 420extern bool cancel_work_sync(struct work_struct *work);
 421
 422extern bool flush_delayed_work(struct delayed_work *dwork);
 423extern bool cancel_delayed_work(struct delayed_work *dwork);
 424extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
 425
 426extern void workqueue_set_max_active(struct workqueue_struct *wq,
 427                                     int max_active);
 428extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
 429extern unsigned int work_cpu(struct work_struct *work);
 430extern unsigned int work_busy(struct work_struct *work);
 431
 432/*
 433 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
 434 * if it returns 0 the timer function may be running and the queueing is in
 435 * progress.
 436 */
 437static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work)
 438{
 439        bool ret;
 440
 441        ret = del_timer(&work->timer);
 442        if (ret)
 443                work_clear_pending(&work->work);
 444        return ret;
 445}
 446
 447/* used to be different but now identical to flush_work(), deprecated */
 448static inline bool __deprecated flush_work_sync(struct work_struct *work)
 449{
 450        return flush_work(work);
 451}
 452
 453/* used to be different but now identical to flush_delayed_work(), deprecated */
 454static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork)
 455{
 456        return flush_delayed_work(dwork);
 457}
 458
 459#ifndef CONFIG_SMP
 460static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
 461{
 462        return fn(arg);
 463}
 464#else
 465long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
 466#endif /* CONFIG_SMP */
 467
 468#ifdef CONFIG_FREEZER
 469extern void freeze_workqueues_begin(void);
 470extern bool freeze_workqueues_busy(void);
 471extern void thaw_workqueues(void);
 472#endif /* CONFIG_FREEZER */
 473
 474#endif
 475
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.