linux/kernel/async.c
<<
>>
Prefs
   1/*
   2 * async.c: Asynchronous function calls for boot performance
   3 *
   4 * (C) Copyright 2009 Intel Corporation
   5 * Author: Arjan van de Ven <arjan@linux.intel.com>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; version 2
  10 * of the License.
  11 */
  12
  13
  14/*
  15
  16Goals and Theory of Operation
  17
  18The primary goal of this feature is to reduce the kernel boot time,
  19by doing various independent hardware delays and discovery operations
  20decoupled and not strictly serialized.
  21
  22More specifically, the asynchronous function call concept allows
  23certain operations (primarily during system boot) to happen
  24asynchronously, out of order, while these operations still
  25have their externally visible parts happen sequentially and in-order.
  26(not unlike how out-of-order CPUs retire their instructions in order)
  27
  28Key to the asynchronous function call implementation is the concept of
  29a "sequence cookie" (which, although it has an abstracted type, can be
  30thought of as a monotonically incrementing number).
  31
  32The async core will assign each scheduled event such a sequence cookie and
  33pass this to the called functions.
  34
  35The asynchronously called function should before doing a globally visible
  36operation, such as registering device numbers, call the
  37async_synchronize_cookie() function and pass in its own cookie. The
  38async_synchronize_cookie() function will make sure that all asynchronous
  39operations that were scheduled prior to the operation corresponding with the
  40cookie have completed.
  41
  42Subsystem/driver initialization code that scheduled asynchronous probe
  43functions, but which shares global resources with other drivers/subsystems
  44that do not use the asynchronous call feature, need to do a full
  45synchronization with the async_synchronize_full() function, before returning
  46from their init function. This is to maintain strict ordering between the
  47asynchronous and synchronous parts of the kernel.
  48
  49*/
  50
  51#include <linux/async.h>
  52#include <linux/atomic.h>
  53#include <linux/ktime.h>
  54#include <linux/export.h>
  55#include <linux/wait.h>
  56#include <linux/sched.h>
  57#include <linux/slab.h>
  58#include <linux/workqueue.h>
  59
  60static async_cookie_t next_cookie = 1;
  61
  62#define MAX_WORK        32768
  63
  64static LIST_HEAD(async_pending);
  65static ASYNC_DOMAIN(async_running);
  66static LIST_HEAD(async_domains);
  67static DEFINE_SPINLOCK(async_lock);
  68static DEFINE_MUTEX(async_register_mutex);
  69
  70struct async_entry {
  71        struct list_head        list;
  72        struct work_struct      work;
  73        async_cookie_t          cookie;
  74        async_func_ptr          *func;
  75        void                    *data;
  76        struct async_domain     *running;
  77};
  78
  79static DECLARE_WAIT_QUEUE_HEAD(async_done);
  80
  81static atomic_t entry_count;
  82
  83
  84/*
  85 * MUST be called with the lock held!
  86 */
  87static async_cookie_t  __lowest_in_progress(struct async_domain *running)
  88{
  89        async_cookie_t first_running = next_cookie;     /* infinity value */
  90        async_cookie_t first_pending = next_cookie;     /* ditto */
  91        struct async_entry *entry;
  92
  93        /*
  94         * Both running and pending lists are sorted but not disjoint.
  95         * Take the first cookies from both and return the min.
  96         */
  97        if (!list_empty(&running->domain)) {
  98                entry = list_first_entry(&running->domain, typeof(*entry), list);
  99                first_running = entry->cookie;
 100        }
 101
 102        list_for_each_entry(entry, &async_pending, list) {
 103                if (entry->running == running) {
 104                        first_pending = entry->cookie;
 105                        break;
 106                }
 107        }
 108
 109        return min(first_running, first_pending);
 110}
 111
 112static async_cookie_t  lowest_in_progress(struct async_domain *running)
 113{
 114        unsigned long flags;
 115        async_cookie_t ret;
 116
 117        spin_lock_irqsave(&async_lock, flags);
 118        ret = __lowest_in_progress(running);
 119        spin_unlock_irqrestore(&async_lock, flags);
 120        return ret;
 121}
 122
 123/*
 124 * pick the first pending entry and run it
 125 */
 126static void async_run_entry_fn(struct work_struct *work)
 127{
 128        struct async_entry *entry =
 129                container_of(work, struct async_entry, work);
 130        struct async_entry *pos;
 131        unsigned long flags;
 132        ktime_t uninitialized_var(calltime), delta, rettime;
 133        struct async_domain *running = entry->running;
 134
 135        /* 1) move self to the running queue, make sure it stays sorted */
 136        spin_lock_irqsave(&async_lock, flags);
 137        list_for_each_entry_reverse(pos, &running->domain, list)
 138                if (entry->cookie < pos->cookie)
 139                        break;
 140        list_move_tail(&entry->list, &pos->list);
 141        spin_unlock_irqrestore(&async_lock, flags);
 142
 143        /* 2) run (and print duration) */
 144        if (initcall_debug && system_state == SYSTEM_BOOTING) {
 145                printk(KERN_DEBUG "calling  %lli_%pF @ %i\n",
 146                        (long long)entry->cookie,
 147                        entry->func, task_pid_nr(current));
 148                calltime = ktime_get();
 149        }
 150        entry->func(entry->data, entry->cookie);
 151        if (initcall_debug && system_state == SYSTEM_BOOTING) {
 152                rettime = ktime_get();
 153                delta = ktime_sub(rettime, calltime);
 154                printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
 155                        (long long)entry->cookie,
 156                        entry->func,
 157                        (long long)ktime_to_ns(delta) >> 10);
 158        }
 159
 160        /* 3) remove self from the running queue */
 161        spin_lock_irqsave(&async_lock, flags);
 162        list_del(&entry->list);
 163        if (running->registered && --running->count == 0)
 164                list_del_init(&running->node);
 165
 166        /* 4) free the entry */
 167        kfree(entry);
 168        atomic_dec(&entry_count);
 169
 170        spin_unlock_irqrestore(&async_lock, flags);
 171
 172        /* 5) wake up any waiters */
 173        wake_up(&async_done);
 174}
 175
 176static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running)
 177{
 178        struct async_entry *entry;
 179        unsigned long flags;
 180        async_cookie_t newcookie;
 181
 182        /* allow irq-off callers */
 183        entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
 184
 185        /*
 186         * If we're out of memory or if there's too much work
 187         * pending already, we execute synchronously.
 188         */
 189        if (!entry || atomic_read(&entry_count) > MAX_WORK) {
 190                kfree(entry);
 191                spin_lock_irqsave(&async_lock, flags);
 192                newcookie = next_cookie++;
 193                spin_unlock_irqrestore(&async_lock, flags);
 194
 195                /* low on memory.. run synchronously */
 196                ptr(data, newcookie);
 197                return newcookie;
 198        }
 199        INIT_WORK(&entry->work, async_run_entry_fn);
 200        entry->func = ptr;
 201        entry->data = data;
 202        entry->running = running;
 203
 204        spin_lock_irqsave(&async_lock, flags);
 205        newcookie = entry->cookie = next_cookie++;
 206        list_add_tail(&entry->list, &async_pending);
 207        if (running->registered && running->count++ == 0)
 208                list_add_tail(&running->node, &async_domains);
 209        atomic_inc(&entry_count);
 210        spin_unlock_irqrestore(&async_lock, flags);
 211
 212        /* schedule for execution */
 213        queue_work(system_unbound_wq, &entry->work);
 214
 215        return newcookie;
 216}
 217
 218/**
 219 * async_schedule - schedule a function for asynchronous execution
 220 * @ptr: function to execute asynchronously
 221 * @data: data pointer to pass to the function
 222 *
 223 * Returns an async_cookie_t that may be used for checkpointing later.
 224 * Note: This function may be called from atomic or non-atomic contexts.
 225 */
 226async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
 227{
 228        return __async_schedule(ptr, data, &async_running);
 229}
 230EXPORT_SYMBOL_GPL(async_schedule);
 231
 232/**
 233 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
 234 * @ptr: function to execute asynchronously
 235 * @data: data pointer to pass to the function
 236 * @running: running list for the domain
 237 *
 238 * Returns an async_cookie_t that may be used for checkpointing later.
 239 * @running may be used in the async_synchronize_*_domain() functions
 240 * to wait within a certain synchronization domain rather than globally.
 241 * A synchronization domain is specified via the running queue @running to use.
 242 * Note: This function may be called from atomic or non-atomic contexts.
 243 */
 244async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
 245                                     struct async_domain *running)
 246{
 247        return __async_schedule(ptr, data, running);
 248}
 249EXPORT_SYMBOL_GPL(async_schedule_domain);
 250
 251/**
 252 * async_synchronize_full - synchronize all asynchronous function calls
 253 *
 254 * This function waits until all asynchronous function calls have been done.
 255 */
 256void async_synchronize_full(void)
 257{
 258        mutex_lock(&async_register_mutex);
 259        do {
 260                struct async_domain *domain = NULL;
 261
 262                spin_lock_irq(&async_lock);
 263                if (!list_empty(&async_domains))
 264                        domain = list_first_entry(&async_domains, typeof(*domain), node);
 265                spin_unlock_irq(&async_lock);
 266
 267                async_synchronize_cookie_domain(next_cookie, domain);
 268        } while (!list_empty(&async_domains));
 269        mutex_unlock(&async_register_mutex);
 270}
 271EXPORT_SYMBOL_GPL(async_synchronize_full);
 272
 273/**
 274 * async_unregister_domain - ensure no more anonymous waiters on this domain
 275 * @domain: idle domain to flush out of any async_synchronize_full instances
 276 *
 277 * async_synchronize_{cookie|full}_domain() are not flushed since callers
 278 * of these routines should know the lifetime of @domain
 279 *
 280 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
 281 */
 282void async_unregister_domain(struct async_domain *domain)
 283{
 284        mutex_lock(&async_register_mutex);
 285        spin_lock_irq(&async_lock);
 286        WARN_ON(!domain->registered || !list_empty(&domain->node) ||
 287                !list_empty(&domain->domain));
 288        domain->registered = 0;
 289        spin_unlock_irq(&async_lock);
 290        mutex_unlock(&async_register_mutex);
 291}
 292EXPORT_SYMBOL_GPL(async_unregister_domain);
 293
 294/**
 295 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
 296 * @domain: running list to synchronize on
 297 *
 298 * This function waits until all asynchronous function calls for the
 299 * synchronization domain specified by the running list @domain have been done.
 300 */
 301void async_synchronize_full_domain(struct async_domain *domain)
 302{
 303        async_synchronize_cookie_domain(next_cookie, domain);
 304}
 305EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
 306
 307/**
 308 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
 309 * @cookie: async_cookie_t to use as checkpoint
 310 * @running: running list to synchronize on
 311 *
 312 * This function waits until all asynchronous function calls for the
 313 * synchronization domain specified by running list @running submitted
 314 * prior to @cookie have been done.
 315 */
 316void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running)
 317{
 318        ktime_t uninitialized_var(starttime), delta, endtime;
 319
 320        if (!running)
 321                return;
 322
 323        if (initcall_debug && system_state == SYSTEM_BOOTING) {
 324                printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
 325                starttime = ktime_get();
 326        }
 327
 328        wait_event(async_done, lowest_in_progress(running) >= cookie);
 329
 330        if (initcall_debug && system_state == SYSTEM_BOOTING) {
 331                endtime = ktime_get();
 332                delta = ktime_sub(endtime, starttime);
 333
 334                printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
 335                        task_pid_nr(current),
 336                        (long long)ktime_to_ns(delta) >> 10);
 337        }
 338}
 339EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
 340
 341/**
 342 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
 343 * @cookie: async_cookie_t to use as checkpoint
 344 *
 345 * This function waits until all asynchronous function calls prior to @cookie
 346 * have been done.
 347 */
 348void async_synchronize_cookie(async_cookie_t cookie)
 349{
 350        async_synchronize_cookie_domain(cookie, &async_running);
 351}
 352EXPORT_SYMBOL_GPL(async_synchronize_cookie);
 353
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.