linux/kernel/rtmutex.c
<<
>>
Prefs
   1/*
   2 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
   3 *
   4 * started by Ingo Molnar and Thomas Gleixner.
   5 *
   6 *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   7 *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
   8 *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
   9 *  Copyright (C) 2006 Esben Nielsen
  10 *
  11 *  See Documentation/rt-mutex-design.txt for details.
  12 */
  13#include <linux/spinlock.h>
  14#include <linux/export.h>
  15#include <linux/sched.h>
  16#include <linux/sched/rt.h>
  17#include <linux/timer.h>
  18
  19#include "rtmutex_common.h"
  20
  21/*
  22 * lock->owner state tracking:
  23 *
  24 * lock->owner holds the task_struct pointer of the owner. Bit 0
  25 * is used to keep track of the "lock has waiters" state.
  26 *
  27 * owner        bit0
  28 * NULL         0       lock is free (fast acquire possible)
  29 * NULL         1       lock is free and has waiters and the top waiter
  30 *                              is going to take the lock*
  31 * taskpointer  0       lock is held (fast release possible)
  32 * taskpointer  1       lock is held and has waiters**
  33 *
  34 * The fast atomic compare exchange based acquire and release is only
  35 * possible when bit 0 of lock->owner is 0.
  36 *
  37 * (*) It also can be a transitional state when grabbing the lock
  38 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
  39 * we need to set the bit0 before looking at the lock, and the owner may be
  40 * NULL in this small time, hence this can be a transitional state.
  41 *
  42 * (**) There is a small time when bit 0 is set but there are no
  43 * waiters. This can happen when grabbing the lock in the slow path.
  44 * To prevent a cmpxchg of the owner releasing the lock, we need to
  45 * set this bit before looking at the lock.
  46 */
  47
  48static void
  49rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
  50{
  51        unsigned long val = (unsigned long)owner;
  52
  53        if (rt_mutex_has_waiters(lock))
  54                val |= RT_MUTEX_HAS_WAITERS;
  55
  56        lock->owner = (struct task_struct *)val;
  57}
  58
  59static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
  60{
  61        lock->owner = (struct task_struct *)
  62                        ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
  63}
  64
  65static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
  66{
  67        if (!rt_mutex_has_waiters(lock))
  68                clear_rt_mutex_waiters(lock);
  69}
  70
  71/*
  72 * We can speed up the acquire/release, if the architecture
  73 * supports cmpxchg and if there's no debugging state to be set up
  74 */
  75#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
  76# define rt_mutex_cmpxchg(l,c,n)        (cmpxchg(&l->owner, c, n) == c)
  77static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
  78{
  79        unsigned long owner, *p = (unsigned long *) &lock->owner;
  80
  81        do {
  82                owner = *p;
  83        } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
  84}
  85#else
  86# define rt_mutex_cmpxchg(l,c,n)        (0)
  87static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
  88{
  89        lock->owner = (struct task_struct *)
  90                        ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
  91}
  92#endif
  93
  94/*
  95 * Calculate task priority from the waiter list priority
  96 *
  97 * Return task->normal_prio when the waiter list is empty or when
  98 * the waiter is not allowed to do priority boosting
  99 */
 100int rt_mutex_getprio(struct task_struct *task)
 101{
 102        if (likely(!task_has_pi_waiters(task)))
 103                return task->normal_prio;
 104
 105        return min(task_top_pi_waiter(task)->pi_list_entry.prio,
 106                   task->normal_prio);
 107}
 108
 109/*
 110 * Adjust the priority of a task, after its pi_waiters got modified.
 111 *
 112 * This can be both boosting and unboosting. task->pi_lock must be held.
 113 */
 114static void __rt_mutex_adjust_prio(struct task_struct *task)
 115{
 116        int prio = rt_mutex_getprio(task);
 117
 118        if (task->prio != prio)
 119                rt_mutex_setprio(task, prio);
 120}
 121
 122/*
 123 * Adjust task priority (undo boosting). Called from the exit path of
 124 * rt_mutex_slowunlock() and rt_mutex_slowlock().
 125 *
 126 * (Note: We do this outside of the protection of lock->wait_lock to
 127 * allow the lock to be taken while or before we readjust the priority
 128 * of task. We do not use the spin_xx_mutex() variants here as we are
 129 * outside of the debug path.)
 130 */
 131static void rt_mutex_adjust_prio(struct task_struct *task)
 132{
 133        unsigned long flags;
 134
 135        raw_spin_lock_irqsave(&task->pi_lock, flags);
 136        __rt_mutex_adjust_prio(task);
 137        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 138}
 139
 140/*
 141 * Max number of times we'll walk the boosting chain:
 142 */
 143int max_lock_depth = 1024;
 144
 145/*
 146 * Adjust the priority chain. Also used for deadlock detection.
 147 * Decreases task's usage by one - may thus free the task.
 148 *
 149 * @task: the task owning the mutex (owner) for which a chain walk is probably
 150 *        needed
 151 * @deadlock_detect: do we have to carry out deadlock detection?
 152 * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
 153 *             things for a task that has just got its priority adjusted, and
 154 *             is waiting on a mutex)
 155 * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
 156 *               its priority to the mutex owner (can be NULL in the case
 157 *               depicted above or if the top waiter is gone away and we are
 158 *               actually deboosting the owner)
 159 * @top_task: the current top waiter
 160 *
 161 * Returns 0 or -EDEADLK.
 162 */
 163static int rt_mutex_adjust_prio_chain(struct task_struct *task,
 164                                      int deadlock_detect,
 165                                      struct rt_mutex *orig_lock,
 166                                      struct rt_mutex_waiter *orig_waiter,
 167                                      struct task_struct *top_task)
 168{
 169        struct rt_mutex *lock;
 170        struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
 171        int detect_deadlock, ret = 0, depth = 0;
 172        unsigned long flags;
 173
 174        detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
 175                                                         deadlock_detect);
 176
 177        /*
 178         * The (de)boosting is a step by step approach with a lot of
 179         * pitfalls. We want this to be preemptible and we want hold a
 180         * maximum of two locks per step. So we have to check
 181         * carefully whether things change under us.
 182         */
 183 again:
 184        if (++depth > max_lock_depth) {
 185                static int prev_max;
 186
 187                /*
 188                 * Print this only once. If the admin changes the limit,
 189                 * print a new message when reaching the limit again.
 190                 */
 191                if (prev_max != max_lock_depth) {
 192                        prev_max = max_lock_depth;
 193                        printk(KERN_WARNING "Maximum lock depth %d reached "
 194                               "task: %s (%d)\n", max_lock_depth,
 195                               top_task->comm, task_pid_nr(top_task));
 196                }
 197                put_task_struct(task);
 198
 199                return deadlock_detect ? -EDEADLK : 0;
 200        }
 201 retry:
 202        /*
 203         * Task can not go away as we did a get_task() before !
 204         */
 205        raw_spin_lock_irqsave(&task->pi_lock, flags);
 206
 207        waiter = task->pi_blocked_on;
 208        /*
 209         * Check whether the end of the boosting chain has been
 210         * reached or the state of the chain has changed while we
 211         * dropped the locks.
 212         */
 213        if (!waiter)
 214                goto out_unlock_pi;
 215
 216        /*
 217         * Check the orig_waiter state. After we dropped the locks,
 218         * the previous owner of the lock might have released the lock.
 219         */
 220        if (orig_waiter && !rt_mutex_owner(orig_lock))
 221                goto out_unlock_pi;
 222
 223        /*
 224         * Drop out, when the task has no waiters. Note,
 225         * top_waiter can be NULL, when we are in the deboosting
 226         * mode!
 227         */
 228        if (top_waiter && (!task_has_pi_waiters(task) ||
 229                           top_waiter != task_top_pi_waiter(task)))
 230                goto out_unlock_pi;
 231
 232        /*
 233         * When deadlock detection is off then we check, if further
 234         * priority adjustment is necessary.
 235         */
 236        if (!detect_deadlock && waiter->list_entry.prio == task->prio)
 237                goto out_unlock_pi;
 238
 239        lock = waiter->lock;
 240        if (!raw_spin_trylock(&lock->wait_lock)) {
 241                raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 242                cpu_relax();
 243                goto retry;
 244        }
 245
 246        /* Deadlock detection */
 247        if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
 248                debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
 249                raw_spin_unlock(&lock->wait_lock);
 250                ret = deadlock_detect ? -EDEADLK : 0;
 251                goto out_unlock_pi;
 252        }
 253
 254        top_waiter = rt_mutex_top_waiter(lock);
 255
 256        /* Requeue the waiter */
 257        plist_del(&waiter->list_entry, &lock->wait_list);
 258        waiter->list_entry.prio = task->prio;
 259        plist_add(&waiter->list_entry, &lock->wait_list);
 260
 261        /* Release the task */
 262        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 263        if (!rt_mutex_owner(lock)) {
 264                /*
 265                 * If the requeue above changed the top waiter, then we need
 266                 * to wake the new top waiter up to try to get the lock.
 267                 */
 268
 269                if (top_waiter != rt_mutex_top_waiter(lock))
 270                        wake_up_process(rt_mutex_top_waiter(lock)->task);
 271                raw_spin_unlock(&lock->wait_lock);
 272                goto out_put_task;
 273        }
 274        put_task_struct(task);
 275
 276        /* Grab the next task */
 277        task = rt_mutex_owner(lock);
 278        get_task_struct(task);
 279        raw_spin_lock_irqsave(&task->pi_lock, flags);
 280
 281        if (waiter == rt_mutex_top_waiter(lock)) {
 282                /* Boost the owner */
 283                plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
 284                waiter->pi_list_entry.prio = waiter->list_entry.prio;
 285                plist_add(&waiter->pi_list_entry, &task->pi_waiters);
 286                __rt_mutex_adjust_prio(task);
 287
 288        } else if (top_waiter == waiter) {
 289                /* Deboost the owner */
 290                plist_del(&waiter->pi_list_entry, &task->pi_waiters);
 291                waiter = rt_mutex_top_waiter(lock);
 292                waiter->pi_list_entry.prio = waiter->list_entry.prio;
 293                plist_add(&waiter->pi_list_entry, &task->pi_waiters);
 294                __rt_mutex_adjust_prio(task);
 295        }
 296
 297        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 298
 299        top_waiter = rt_mutex_top_waiter(lock);
 300        raw_spin_unlock(&lock->wait_lock);
 301
 302        if (!detect_deadlock && waiter != top_waiter)
 303                goto out_put_task;
 304
 305        goto again;
 306
 307 out_unlock_pi:
 308        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 309 out_put_task:
 310        put_task_struct(task);
 311
 312        return ret;
 313}
 314
 315/*
 316 * Try to take an rt-mutex
 317 *
 318 * Must be called with lock->wait_lock held.
 319 *
 320 * @lock:   the lock to be acquired.
 321 * @task:   the task which wants to acquire the lock
 322 * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
 323 */
 324static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
 325                struct rt_mutex_waiter *waiter)
 326{
 327        /*
 328         * We have to be careful here if the atomic speedups are
 329         * enabled, such that, when
 330         *  - no other waiter is on the lock
 331         *  - the lock has been released since we did the cmpxchg
 332         * the lock can be released or taken while we are doing the
 333         * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
 334         *
 335         * The atomic acquire/release aware variant of
 336         * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
 337         * the WAITERS bit, the atomic release / acquire can not
 338         * happen anymore and lock->wait_lock protects us from the
 339         * non-atomic case.
 340         *
 341         * Note, that this might set lock->owner =
 342         * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
 343         * any more. This is fixed up when we take the ownership.
 344         * This is the transitional state explained at the top of this file.
 345         */
 346        mark_rt_mutex_waiters(lock);
 347
 348        if (rt_mutex_owner(lock))
 349                return 0;
 350
 351        /*
 352         * It will get the lock because of one of these conditions:
 353         * 1) there is no waiter
 354         * 2) higher priority than waiters
 355         * 3) it is top waiter
 356         */
 357        if (rt_mutex_has_waiters(lock)) {
 358                if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
 359                        if (!waiter || waiter != rt_mutex_top_waiter(lock))
 360                                return 0;
 361                }
 362        }
 363
 364        if (waiter || rt_mutex_has_waiters(lock)) {
 365                unsigned long flags;
 366                struct rt_mutex_waiter *top;
 367
 368                raw_spin_lock_irqsave(&task->pi_lock, flags);
 369
 370                /* remove the queued waiter. */
 371                if (waiter) {
 372                        plist_del(&waiter->list_entry, &lock->wait_list);
 373                        task->pi_blocked_on = NULL;
 374                }
 375
 376                /*
 377                 * We have to enqueue the top waiter(if it exists) into
 378                 * task->pi_waiters list.
 379                 */
 380                if (rt_mutex_has_waiters(lock)) {
 381                        top = rt_mutex_top_waiter(lock);
 382                        top->pi_list_entry.prio = top->list_entry.prio;
 383                        plist_add(&top->pi_list_entry, &task->pi_waiters);
 384                }
 385                raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 386        }
 387
 388        /* We got the lock. */
 389        debug_rt_mutex_lock(lock);
 390
 391        rt_mutex_set_owner(lock, task);
 392
 393        rt_mutex_deadlock_account_lock(lock, task);
 394
 395        return 1;
 396}
 397
 398/*
 399 * Task blocks on lock.
 400 *
 401 * Prepare waiter and propagate pi chain
 402 *
 403 * This must be called with lock->wait_lock held.
 404 */
 405static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 406                                   struct rt_mutex_waiter *waiter,
 407                                   struct task_struct *task,
 408                                   int detect_deadlock)
 409{
 410        struct task_struct *owner = rt_mutex_owner(lock);
 411        struct rt_mutex_waiter *top_waiter = waiter;
 412        unsigned long flags;
 413        int chain_walk = 0, res;
 414
 415        raw_spin_lock_irqsave(&task->pi_lock, flags);
 416        __rt_mutex_adjust_prio(task);
 417        waiter->task = task;
 418        waiter->lock = lock;
 419        plist_node_init(&waiter->list_entry, task->prio);
 420        plist_node_init(&waiter->pi_list_entry, task->prio);
 421
 422        /* Get the top priority waiter on the lock */
 423        if (rt_mutex_has_waiters(lock))
 424                top_waiter = rt_mutex_top_waiter(lock);
 425        plist_add(&waiter->list_entry, &lock->wait_list);
 426
 427        task->pi_blocked_on = waiter;
 428
 429        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 430
 431        if (!owner)
 432                return 0;
 433
 434        if (waiter == rt_mutex_top_waiter(lock)) {
 435                raw_spin_lock_irqsave(&owner->pi_lock, flags);
 436                plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
 437                plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
 438
 439                __rt_mutex_adjust_prio(owner);
 440                if (owner->pi_blocked_on)
 441                        chain_walk = 1;
 442                raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
 443        }
 444        else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
 445                chain_walk = 1;
 446
 447        if (!chain_walk)
 448                return 0;
 449
 450        /*
 451         * The owner can't disappear while holding a lock,
 452         * so the owner struct is protected by wait_lock.
 453         * Gets dropped in rt_mutex_adjust_prio_chain()!
 454         */
 455        get_task_struct(owner);
 456
 457        raw_spin_unlock(&lock->wait_lock);
 458
 459        res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
 460                                         task);
 461
 462        raw_spin_lock(&lock->wait_lock);
 463
 464        return res;
 465}
 466
 467/*
 468 * Wake up the next waiter on the lock.
 469 *
 470 * Remove the top waiter from the current tasks waiter list and wake it up.
 471 *
 472 * Called with lock->wait_lock held.
 473 */
 474static void wakeup_next_waiter(struct rt_mutex *lock)
 475{
 476        struct rt_mutex_waiter *waiter;
 477        unsigned long flags;
 478
 479        raw_spin_lock_irqsave(&current->pi_lock, flags);
 480
 481        waiter = rt_mutex_top_waiter(lock);
 482
 483        /*
 484         * Remove it from current->pi_waiters. We do not adjust a
 485         * possible priority boost right now. We execute wakeup in the
 486         * boosted mode and go back to normal after releasing
 487         * lock->wait_lock.
 488         */
 489        plist_del(&waiter->pi_list_entry, &current->pi_waiters);
 490
 491        rt_mutex_set_owner(lock, NULL);
 492
 493        raw_spin_unlock_irqrestore(&current->pi_lock, flags);
 494
 495        wake_up_process(waiter->task);
 496}
 497
 498/*
 499 * Remove a waiter from a lock and give up
 500 *
 501 * Must be called with lock->wait_lock held and
 502 * have just failed to try_to_take_rt_mutex().
 503 */
 504static void remove_waiter(struct rt_mutex *lock,
 505                          struct rt_mutex_waiter *waiter)
 506{
 507        int first = (waiter == rt_mutex_top_waiter(lock));
 508        struct task_struct *owner = rt_mutex_owner(lock);
 509        unsigned long flags;
 510        int chain_walk = 0;
 511
 512        raw_spin_lock_irqsave(&current->pi_lock, flags);
 513        plist_del(&waiter->list_entry, &lock->wait_list);
 514        current->pi_blocked_on = NULL;
 515        raw_spin_unlock_irqrestore(&current->pi_lock, flags);
 516
 517        if (!owner)
 518                return;
 519
 520        if (first) {
 521
 522                raw_spin_lock_irqsave(&owner->pi_lock, flags);
 523
 524                plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
 525
 526                if (rt_mutex_has_waiters(lock)) {
 527                        struct rt_mutex_waiter *next;
 528
 529                        next = rt_mutex_top_waiter(lock);
 530                        plist_add(&next->pi_list_entry, &owner->pi_waiters);
 531                }
 532                __rt_mutex_adjust_prio(owner);
 533
 534                if (owner->pi_blocked_on)
 535                        chain_walk = 1;
 536
 537                raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
 538        }
 539
 540        WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
 541
 542        if (!chain_walk)
 543                return;
 544
 545        /* gets dropped in rt_mutex_adjust_prio_chain()! */
 546        get_task_struct(owner);
 547
 548        raw_spin_unlock(&lock->wait_lock);
 549
 550        rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
 551
 552        raw_spin_lock(&lock->wait_lock);
 553}
 554
 555/*
 556 * Recheck the pi chain, in case we got a priority setting
 557 *
 558 * Called from sched_setscheduler
 559 */
 560void rt_mutex_adjust_pi(struct task_struct *task)
 561{
 562        struct rt_mutex_waiter *waiter;
 563        unsigned long flags;
 564
 565        raw_spin_lock_irqsave(&task->pi_lock, flags);
 566
 567        waiter = task->pi_blocked_on;
 568        if (!waiter || waiter->list_entry.prio == task->prio) {
 569                raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 570                return;
 571        }
 572
 573        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 574
 575        /* gets dropped in rt_mutex_adjust_prio_chain()! */
 576        get_task_struct(task);
 577        rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
 578}
 579
 580/**
 581 * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
 582 * @lock:                the rt_mutex to take
 583 * @state:               the state the task should block in (TASK_INTERRUPTIBLE
 584 *                       or TASK_UNINTERRUPTIBLE)
 585 * @timeout:             the pre-initialized and started timer, or NULL for none
 586 * @waiter:              the pre-initialized rt_mutex_waiter
 587 *
 588 * lock->wait_lock must be held by the caller.
 589 */
 590static int __sched
 591__rt_mutex_slowlock(struct rt_mutex *lock, int state,
 592                    struct hrtimer_sleeper *timeout,
 593                    struct rt_mutex_waiter *waiter)
 594{
 595        int ret = 0;
 596
 597        for (;;) {
 598                /* Try to acquire the lock: */
 599                if (try_to_take_rt_mutex(lock, current, waiter))
 600                        break;
 601
 602                /*
 603                 * TASK_INTERRUPTIBLE checks for signals and
 604                 * timeout. Ignored otherwise.
 605                 */
 606                if (unlikely(state == TASK_INTERRUPTIBLE)) {
 607                        /* Signal pending? */
 608                        if (signal_pending(current))
 609                                ret = -EINTR;
 610                        if (timeout && !timeout->task)
 611                                ret = -ETIMEDOUT;
 612                        if (ret)
 613                                break;
 614                }
 615
 616                raw_spin_unlock(&lock->wait_lock);
 617
 618                debug_rt_mutex_print_deadlock(waiter);
 619
 620                schedule_rt_mutex(lock);
 621
 622                raw_spin_lock(&lock->wait_lock);
 623                set_current_state(state);
 624        }
 625
 626        return ret;
 627}
 628
 629/*
 630 * Slow path lock function:
 631 */
 632static int __sched
 633rt_mutex_slowlock(struct rt_mutex *lock, int state,
 634                  struct hrtimer_sleeper *timeout,
 635                  int detect_deadlock)
 636{
 637        struct rt_mutex_waiter waiter;
 638        int ret = 0;
 639
 640        debug_rt_mutex_init_waiter(&waiter);
 641
 642        raw_spin_lock(&lock->wait_lock);
 643
 644        /* Try to acquire the lock again: */
 645        if (try_to_take_rt_mutex(lock, current, NULL)) {
 646                raw_spin_unlock(&lock->wait_lock);
 647                return 0;
 648        }
 649
 650        set_current_state(state);
 651
 652        /* Setup the timer, when timeout != NULL */
 653        if (unlikely(timeout)) {
 654                hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
 655                if (!hrtimer_active(&timeout->timer))
 656                        timeout->task = NULL;
 657        }
 658
 659        ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
 660
 661        if (likely(!ret))
 662                ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
 663
 664        set_current_state(TASK_RUNNING);
 665
 666        if (unlikely(ret))
 667                remove_waiter(lock, &waiter);
 668
 669        /*
 670         * try_to_take_rt_mutex() sets the waiter bit
 671         * unconditionally. We might have to fix that up.
 672         */
 673        fixup_rt_mutex_waiters(lock);
 674
 675        raw_spin_unlock(&lock->wait_lock);
 676
 677        /* Remove pending timer: */
 678        if (unlikely(timeout))
 679                hrtimer_cancel(&timeout->timer);
 680
 681        debug_rt_mutex_free_waiter(&waiter);
 682
 683        return ret;
 684}
 685
 686/*
 687 * Slow path try-lock function:
 688 */
 689static inline int
 690rt_mutex_slowtrylock(struct rt_mutex *lock)
 691{
 692        int ret = 0;
 693
 694        raw_spin_lock(&lock->wait_lock);
 695
 696        if (likely(rt_mutex_owner(lock) != current)) {
 697
 698                ret = try_to_take_rt_mutex(lock, current, NULL);
 699                /*
 700                 * try_to_take_rt_mutex() sets the lock waiters
 701                 * bit unconditionally. Clean this up.
 702                 */
 703                fixup_rt_mutex_waiters(lock);
 704        }
 705
 706        raw_spin_unlock(&lock->wait_lock);
 707
 708        return ret;
 709}
 710
 711/*
 712 * Slow path to release a rt-mutex:
 713 */
 714static void __sched
 715rt_mutex_slowunlock(struct rt_mutex *lock)
 716{
 717        raw_spin_lock(&lock->wait_lock);
 718
 719        debug_rt_mutex_unlock(lock);
 720
 721        rt_mutex_deadlock_account_unlock(current);
 722
 723        if (!rt_mutex_has_waiters(lock)) {
 724                lock->owner = NULL;
 725                raw_spin_unlock(&lock->wait_lock);
 726                return;
 727        }
 728
 729        wakeup_next_waiter(lock);
 730
 731        raw_spin_unlock(&lock->wait_lock);
 732
 733        /* Undo pi boosting if necessary: */
 734        rt_mutex_adjust_prio(current);
 735}
 736
 737/*
 738 * debug aware fast / slowpath lock,trylock,unlock
 739 *
 740 * The atomic acquire/release ops are compiled away, when either the
 741 * architecture does not support cmpxchg or when debugging is enabled.
 742 */
 743static inline int
 744rt_mutex_fastlock(struct rt_mutex *lock, int state,
 745                  int detect_deadlock,
 746                  int (*slowfn)(struct rt_mutex *lock, int state,
 747                                struct hrtimer_sleeper *timeout,
 748                                int detect_deadlock))
 749{
 750        if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
 751                rt_mutex_deadlock_account_lock(lock, current);
 752                return 0;
 753        } else
 754                return slowfn(lock, state, NULL, detect_deadlock);
 755}
 756
 757static inline int
 758rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
 759                        struct hrtimer_sleeper *timeout, int detect_deadlock,
 760                        int (*slowfn)(struct rt_mutex *lock, int state,
 761                                      struct hrtimer_sleeper *timeout,
 762                                      int detect_deadlock))
 763{
 764        if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
 765                rt_mutex_deadlock_account_lock(lock, current);
 766                return 0;
 767        } else
 768                return slowfn(lock, state, timeout, detect_deadlock);
 769}
 770
 771static inline int
 772rt_mutex_fasttrylock(struct rt_mutex *lock,
 773                     int (*slowfn)(struct rt_mutex *lock))
 774{
 775        if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
 776                rt_mutex_deadlock_account_lock(lock, current);
 777                return 1;
 778        }
 779        return slowfn(lock);
 780}
 781
 782static inline void
 783rt_mutex_fastunlock(struct rt_mutex *lock,
 784                    void (*slowfn)(struct rt_mutex *lock))
 785{
 786        if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
 787                rt_mutex_deadlock_account_unlock(current);
 788        else
 789                slowfn(lock);
 790}
 791
 792/**
 793 * rt_mutex_lock - lock a rt_mutex
 794 *
 795 * @lock: the rt_mutex to be locked
 796 */
 797void __sched rt_mutex_lock(struct rt_mutex *lock)
 798{
 799        might_sleep();
 800
 801        rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
 802}
 803EXPORT_SYMBOL_GPL(rt_mutex_lock);
 804
 805/**
 806 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
 807 *
 808 * @lock:               the rt_mutex to be locked
 809 * @detect_deadlock:    deadlock detection on/off
 810 *
 811 * Returns:
 812 *  0           on success
 813 * -EINTR       when interrupted by a signal
 814 * -EDEADLK     when the lock would deadlock (when deadlock detection is on)
 815 */
 816int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
 817                                                 int detect_deadlock)
 818{
 819        might_sleep();
 820
 821        return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
 822                                 detect_deadlock, rt_mutex_slowlock);
 823}
 824EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
 825
 826/**
 827 * rt_mutex_timed_lock - lock a rt_mutex interruptible
 828 *                      the timeout structure is provided
 829 *                      by the caller
 830 *
 831 * @lock:               the rt_mutex to be locked
 832 * @timeout:            timeout structure or NULL (no timeout)
 833 * @detect_deadlock:    deadlock detection on/off
 834 *
 835 * Returns:
 836 *  0           on success
 837 * -EINTR       when interrupted by a signal
 838 * -ETIMEDOUT   when the timeout expired
 839 * -EDEADLK     when the lock would deadlock (when deadlock detection is on)
 840 */
 841int
 842rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
 843                    int detect_deadlock)
 844{
 845        might_sleep();
 846
 847        return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
 848                                       detect_deadlock, rt_mutex_slowlock);
 849}
 850EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
 851
 852/**
 853 * rt_mutex_trylock - try to lock a rt_mutex
 854 *
 855 * @lock:       the rt_mutex to be locked
 856 *
 857 * Returns 1 on success and 0 on contention
 858 */
 859int __sched rt_mutex_trylock(struct rt_mutex *lock)
 860{
 861        return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
 862}
 863EXPORT_SYMBOL_GPL(rt_mutex_trylock);
 864
 865/**
 866 * rt_mutex_unlock - unlock a rt_mutex
 867 *
 868 * @lock: the rt_mutex to be unlocked
 869 */
 870void __sched rt_mutex_unlock(struct rt_mutex *lock)
 871{
 872        rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
 873}
 874EXPORT_SYMBOL_GPL(rt_mutex_unlock);
 875
 876/**
 877 * rt_mutex_destroy - mark a mutex unusable
 878 * @lock: the mutex to be destroyed
 879 *
 880 * This function marks the mutex uninitialized, and any subsequent
 881 * use of the mutex is forbidden. The mutex must not be locked when
 882 * this function is called.
 883 */
 884void rt_mutex_destroy(struct rt_mutex *lock)
 885{
 886        WARN_ON(rt_mutex_is_locked(lock));
 887#ifdef CONFIG_DEBUG_RT_MUTEXES
 888        lock->magic = NULL;
 889#endif
 890}
 891
 892EXPORT_SYMBOL_GPL(rt_mutex_destroy);
 893
 894/**
 895 * __rt_mutex_init - initialize the rt lock
 896 *
 897 * @lock: the rt lock to be initialized
 898 *
 899 * Initialize the rt lock to unlocked state.
 900 *
 901 * Initializing of a locked rt lock is not allowed
 902 */
 903void __rt_mutex_init(struct rt_mutex *lock, const char *name)
 904{
 905        lock->owner = NULL;
 906        raw_spin_lock_init(&lock->wait_lock);
 907        plist_head_init(&lock->wait_list);
 908
 909        debug_rt_mutex_init(lock, name);
 910}
 911EXPORT_SYMBOL_GPL(__rt_mutex_init);
 912
 913/**
 914 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
 915 *                              proxy owner
 916 *
 917 * @lock:       the rt_mutex to be locked
 918 * @proxy_owner:the task to set as owner
 919 *
 920 * No locking. Caller has to do serializing itself
 921 * Special API call for PI-futex support
 922 */
 923void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
 924                                struct task_struct *proxy_owner)
 925{
 926        __rt_mutex_init(lock, NULL);
 927        debug_rt_mutex_proxy_lock(lock, proxy_owner);
 928        rt_mutex_set_owner(lock, proxy_owner);
 929        rt_mutex_deadlock_account_lock(lock, proxy_owner);
 930}
 931
 932/**
 933 * rt_mutex_proxy_unlock - release a lock on behalf of owner
 934 *
 935 * @lock:       the rt_mutex to be locked
 936 *
 937 * No locking. Caller has to do serializing itself
 938 * Special API call for PI-futex support
 939 */
 940void rt_mutex_proxy_unlock(struct rt_mutex *lock,
 941                           struct task_struct *proxy_owner)
 942{
 943        debug_rt_mutex_proxy_unlock(lock);
 944        rt_mutex_set_owner(lock, NULL);
 945        rt_mutex_deadlock_account_unlock(proxy_owner);
 946}
 947
 948/**
 949 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
 950 * @lock:               the rt_mutex to take
 951 * @waiter:             the pre-initialized rt_mutex_waiter
 952 * @task:               the task to prepare
 953 * @detect_deadlock:    perform deadlock detection (1) or not (0)
 954 *
 955 * Returns:
 956 *  0 - task blocked on lock
 957 *  1 - acquired the lock for task, caller should wake it up
 958 * <0 - error
 959 *
 960 * Special API call for FUTEX_REQUEUE_PI support.
 961 */
 962int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
 963                              struct rt_mutex_waiter *waiter,
 964                              struct task_struct *task, int detect_deadlock)
 965{
 966        int ret;
 967
 968        raw_spin_lock(&lock->wait_lock);
 969
 970        if (try_to_take_rt_mutex(lock, task, NULL)) {
 971                raw_spin_unlock(&lock->wait_lock);
 972                return 1;
 973        }
 974
 975        ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
 976
 977        if (ret && !rt_mutex_owner(lock)) {
 978                /*
 979                 * Reset the return value. We might have
 980                 * returned with -EDEADLK and the owner
 981                 * released the lock while we were walking the
 982                 * pi chain.  Let the waiter sort it out.
 983                 */
 984                ret = 0;
 985        }
 986
 987        if (unlikely(ret))
 988                remove_waiter(lock, waiter);
 989
 990        raw_spin_unlock(&lock->wait_lock);
 991
 992        debug_rt_mutex_print_deadlock(waiter);
 993
 994        return ret;
 995}
 996
 997/**
 998 * rt_mutex_next_owner - return the next owner of the lock
 999 *
1000 * @lock: the rt lock query
1001 *
1002 * Returns the next owner of the lock or NULL
1003 *
1004 * Caller has to serialize against other accessors to the lock
1005 * itself.
1006 *
1007 * Special API call for PI-futex support
1008 */
1009struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1010{
1011        if (!rt_mutex_has_waiters(lock))
1012                return NULL;
1013
1014        return rt_mutex_top_waiter(lock)->task;
1015}
1016
1017/**
1018 * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1019 * @lock:               the rt_mutex we were woken on
1020 * @to:                 the timeout, null if none. hrtimer should already have
1021 *                      been started.
1022 * @waiter:             the pre-initialized rt_mutex_waiter
1023 * @detect_deadlock:    perform deadlock detection (1) or not (0)
1024 *
1025 * Complete the lock acquisition started our behalf by another thread.
1026 *
1027 * Returns:
1028 *  0 - success
1029 * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
1030 *
1031 * Special API call for PI-futex requeue support
1032 */
1033int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1034                               struct hrtimer_sleeper *to,
1035                               struct rt_mutex_waiter *waiter,
1036                               int detect_deadlock)
1037{
1038        int ret;
1039
1040        raw_spin_lock(&lock->wait_lock);
1041
1042        set_current_state(TASK_INTERRUPTIBLE);
1043
1044        ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1045
1046        set_current_state(TASK_RUNNING);
1047
1048        if (unlikely(ret))
1049                remove_waiter(lock, waiter);
1050
1051        /*
1052         * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1053         * have to fix that up.
1054         */
1055        fixup_rt_mutex_waiters(lock);
1056
1057        raw_spin_unlock(&lock->wait_lock);
1058
1059        return ret;
1060}
1061
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.