linux/kernel/async.c
<<
>>
Prefs
   1/*
   2 * async.c: Asynchronous function calls for boot performance
   3 *
   4 * (C) Copyright 2009 Intel Corporation
   5 * Author: Arjan van de Ven <arjan@linux.intel.com>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; version 2
  10 * of the License.
  11 */
  12
  13
  14/*
  15
  16Goals and Theory of Operation
  17
  18The primary goal of this feature is to reduce the kernel boot time,
  19by doing various independent hardware delays and discovery operations
  20decoupled and not strictly serialized.
  21
  22More specifically, the asynchronous function call concept allows
  23certain operations (primarily during system boot) to happen
  24asynchronously, out of order, while these operations still
  25have their externally visible parts happen sequentially and in-order.
  26(not unlike how out-of-order CPUs retire their instructions in order)
  27
  28Key to the asynchronous function call implementation is the concept of
  29a "sequence cookie" (which, although it has an abstracted type, can be
  30thought of as a monotonically incrementing number).
  31
  32The async core will assign each scheduled event such a sequence cookie and
  33pass this to the called functions.
  34
  35The asynchronously called function should before doing a globally visible
  36operation, such as registering device numbers, call the
  37async_synchronize_cookie() function and pass in its own cookie. The
  38async_synchronize_cookie() function will make sure that all asynchronous
  39operations that were scheduled prior to the operation corresponding with the
  40cookie have completed.
  41
  42Subsystem/driver initialization code that scheduled asynchronous probe
  43functions, but which shares global resources with other drivers/subsystems
  44that do not use the asynchronous call feature, need to do a full
  45synchronization with the async_synchronize_full() function, before returning
  46from their init function. This is to maintain strict ordering between the
  47asynchronous and synchronous parts of the kernel.
  48
  49*/
  50
  51#include <linux/async.h>
  52#include <linux/atomic.h>
  53#include <linux/ktime.h>
  54#include <linux/export.h>
  55#include <linux/wait.h>
  56#include <linux/sched.h>
  57#include <linux/slab.h>
  58#include <linux/workqueue.h>
  59
  60static async_cookie_t next_cookie = 1;
  61
  62#define MAX_WORK        32768
  63
  64static LIST_HEAD(async_pending);
  65static ASYNC_DOMAIN(async_running);
  66static LIST_HEAD(async_domains);
  67static DEFINE_SPINLOCK(async_lock);
  68static DEFINE_MUTEX(async_register_mutex);
  69
  70struct async_entry {
  71        struct list_head        list;
  72        struct work_struct      work;
  73        async_cookie_t          cookie;
  74        async_func_ptr          *func;
  75        void                    *data;
  76        struct async_domain     *running;
  77};
  78
  79static DECLARE_WAIT_QUEUE_HEAD(async_done);
  80
  81static atomic_t entry_count;
  82
  83
  84/*
  85 * MUST be called with the lock held!
  86 */
  87static async_cookie_t  __lowest_in_progress(struct async_domain *running)
  88{
  89        struct async_entry *entry;
  90
  91        if (!list_empty(&running->domain)) {
  92                entry = list_first_entry(&running->domain, typeof(*entry), list);
  93                return entry->cookie;
  94        }
  95
  96        list_for_each_entry(entry, &async_pending, list)
  97                if (entry->running == running)
  98                        return entry->cookie;
  99
 100        return next_cookie;     /* "infinity" value */
 101}
 102
 103static async_cookie_t  lowest_in_progress(struct async_domain *running)
 104{
 105        unsigned long flags;
 106        async_cookie_t ret;
 107
 108        spin_lock_irqsave(&async_lock, flags);
 109        ret = __lowest_in_progress(running);
 110        spin_unlock_irqrestore(&async_lock, flags);
 111        return ret;
 112}
 113
 114/*
 115 * pick the first pending entry and run it
 116 */
 117static void async_run_entry_fn(struct work_struct *work)
 118{
 119        struct async_entry *entry =
 120                container_of(work, struct async_entry, work);
 121        unsigned long flags;
 122        ktime_t uninitialized_var(calltime), delta, rettime;
 123        struct async_domain *running = entry->running;
 124
 125        /* 1) move self to the running queue */
 126        spin_lock_irqsave(&async_lock, flags);
 127        list_move_tail(&entry->list, &running->domain);
 128        spin_unlock_irqrestore(&async_lock, flags);
 129
 130        /* 2) run (and print duration) */
 131        if (initcall_debug && system_state == SYSTEM_BOOTING) {
 132                printk(KERN_DEBUG "calling  %lli_%pF @ %i\n",
 133                        (long long)entry->cookie,
 134                        entry->func, task_pid_nr(current));
 135                calltime = ktime_get();
 136        }
 137        entry->func(entry->data, entry->cookie);
 138        if (initcall_debug && system_state == SYSTEM_BOOTING) {
 139                rettime = ktime_get();
 140                delta = ktime_sub(rettime, calltime);
 141                printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
 142                        (long long)entry->cookie,
 143                        entry->func,
 144                        (long long)ktime_to_ns(delta) >> 10);
 145        }
 146
 147        /* 3) remove self from the running queue */
 148        spin_lock_irqsave(&async_lock, flags);
 149        list_del(&entry->list);
 150        if (running->registered && --running->count == 0)
 151                list_del_init(&running->node);
 152
 153        /* 4) free the entry */
 154        kfree(entry);
 155        atomic_dec(&entry_count);
 156
 157        spin_unlock_irqrestore(&async_lock, flags);
 158
 159        /* 5) wake up any waiters */
 160        wake_up(&async_done);
 161}
 162
 163static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running)
 164{
 165        struct async_entry *entry;
 166        unsigned long flags;
 167        async_cookie_t newcookie;
 168
 169        /* allow irq-off callers */
 170        entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
 171
 172        /*
 173         * If we're out of memory or if there's too much work
 174         * pending already, we execute synchronously.
 175         */
 176        if (!entry || atomic_read(&entry_count) > MAX_WORK) {
 177                kfree(entry);
 178                spin_lock_irqsave(&async_lock, flags);
 179                newcookie = next_cookie++;
 180                spin_unlock_irqrestore(&async_lock, flags);
 181
 182                /* low on memory.. run synchronously */
 183                ptr(data, newcookie);
 184                return newcookie;
 185        }
 186        INIT_WORK(&entry->work, async_run_entry_fn);
 187        entry->func = ptr;
 188        entry->data = data;
 189        entry->running = running;
 190
 191        spin_lock_irqsave(&async_lock, flags);
 192        newcookie = entry->cookie = next_cookie++;
 193        list_add_tail(&entry->list, &async_pending);
 194        if (running->registered && running->count++ == 0)
 195                list_add_tail(&running->node, &async_domains);
 196        atomic_inc(&entry_count);
 197        spin_unlock_irqrestore(&async_lock, flags);
 198
 199        /* schedule for execution */
 200        queue_work(system_unbound_wq, &entry->work);
 201
 202        return newcookie;
 203}
 204
 205/**
 206 * async_schedule - schedule a function for asynchronous execution
 207 * @ptr: function to execute asynchronously
 208 * @data: data pointer to pass to the function
 209 *
 210 * Returns an async_cookie_t that may be used for checkpointing later.
 211 * Note: This function may be called from atomic or non-atomic contexts.
 212 */
 213async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
 214{
 215        return __async_schedule(ptr, data, &async_running);
 216}
 217EXPORT_SYMBOL_GPL(async_schedule);
 218
 219/**
 220 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
 221 * @ptr: function to execute asynchronously
 222 * @data: data pointer to pass to the function
 223 * @running: running list for the domain
 224 *
 225 * Returns an async_cookie_t that may be used for checkpointing later.
 226 * @running may be used in the async_synchronize_*_domain() functions
 227 * to wait within a certain synchronization domain rather than globally.
 228 * A synchronization domain is specified via the running queue @running to use.
 229 * Note: This function may be called from atomic or non-atomic contexts.
 230 */
 231async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
 232                                     struct async_domain *running)
 233{
 234        return __async_schedule(ptr, data, running);
 235}
 236EXPORT_SYMBOL_GPL(async_schedule_domain);
 237
 238/**
 239 * async_synchronize_full - synchronize all asynchronous function calls
 240 *
 241 * This function waits until all asynchronous function calls have been done.
 242 */
 243void async_synchronize_full(void)
 244{
 245        mutex_lock(&async_register_mutex);
 246        do {
 247                struct async_domain *domain = NULL;
 248
 249                spin_lock_irq(&async_lock);
 250                if (!list_empty(&async_domains))
 251                        domain = list_first_entry(&async_domains, typeof(*domain), node);
 252                spin_unlock_irq(&async_lock);
 253
 254                async_synchronize_cookie_domain(next_cookie, domain);
 255        } while (!list_empty(&async_domains));
 256        mutex_unlock(&async_register_mutex);
 257}
 258EXPORT_SYMBOL_GPL(async_synchronize_full);
 259
 260/**
 261 * async_unregister_domain - ensure no more anonymous waiters on this domain
 262 * @domain: idle domain to flush out of any async_synchronize_full instances
 263 *
 264 * async_synchronize_{cookie|full}_domain() are not flushed since callers
 265 * of these routines should know the lifetime of @domain
 266 *
 267 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
 268 */
 269void async_unregister_domain(struct async_domain *domain)
 270{
 271        mutex_lock(&async_register_mutex);
 272        spin_lock_irq(&async_lock);
 273        WARN_ON(!domain->registered || !list_empty(&domain->node) ||
 274                !list_empty(&domain->domain));
 275        domain->registered = 0;
 276        spin_unlock_irq(&async_lock);
 277        mutex_unlock(&async_register_mutex);
 278}
 279EXPORT_SYMBOL_GPL(async_unregister_domain);
 280
 281/**
 282 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
 283 * @domain: running list to synchronize on
 284 *
 285 * This function waits until all asynchronous function calls for the
 286 * synchronization domain specified by the running list @domain have been done.
 287 */
 288void async_synchronize_full_domain(struct async_domain *domain)
 289{
 290        async_synchronize_cookie_domain(next_cookie, domain);
 291}
 292EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
 293
 294/**
 295 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
 296 * @cookie: async_cookie_t to use as checkpoint
 297 * @running: running list to synchronize on
 298 *
 299 * This function waits until all asynchronous function calls for the
 300 * synchronization domain specified by running list @running submitted
 301 * prior to @cookie have been done.
 302 */
 303void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running)
 304{
 305        ktime_t uninitialized_var(starttime), delta, endtime;
 306
 307        if (!running)
 308                return;
 309
 310        if (initcall_debug && system_state == SYSTEM_BOOTING) {
 311                printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
 312                starttime = ktime_get();
 313        }
 314
 315        wait_event(async_done, lowest_in_progress(running) >= cookie);
 316
 317        if (initcall_debug && system_state == SYSTEM_BOOTING) {
 318                endtime = ktime_get();
 319                delta = ktime_sub(endtime, starttime);
 320
 321                printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
 322                        task_pid_nr(current),
 323                        (long long)ktime_to_ns(delta) >> 10);
 324        }
 325}
 326EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
 327
 328/**
 329 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
 330 * @cookie: async_cookie_t to use as checkpoint
 331 *
 332 * This function waits until all asynchronous function calls prior to @cookie
 333 * have been done.
 334 */
 335void async_synchronize_cookie(async_cookie_t cookie)
 336{
 337        async_synchronize_cookie_domain(cookie, &async_running);
 338}
 339EXPORT_SYMBOL_GPL(async_synchronize_cookie);
 340
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.