linux/kernel/locking/rwsem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* kernel/rwsem.c: R/W semaphores, public implementation
   3 *
   4 * Written by David Howells (dhowells@redhat.com).
   5 * Derived from asm-i386/semaphore.h
   6 *
   7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
   8 * and Michel Lespinasse <walken@google.com>
   9 *
  10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
  11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
  12 *
  13 * Rwsem count bit fields re-definition and rwsem rearchitecture by
  14 * Waiman Long <longman@redhat.com> and
  15 * Peter Zijlstra <peterz@infradead.org>.
  16 */
  17
  18#include <linux/types.h>
  19#include <linux/kernel.h>
  20#include <linux/sched.h>
  21#include <linux/sched/rt.h>
  22#include <linux/sched/task.h>
  23#include <linux/sched/debug.h>
  24#include <linux/sched/wake_q.h>
  25#include <linux/sched/signal.h>
  26#include <linux/sched/clock.h>
  27#include <linux/export.h>
  28#include <linux/rwsem.h>
  29#include <linux/atomic.h>
  30#include <trace/events/lock.h>
  31
  32#ifndef CONFIG_PREEMPT_RT
  33#include "lock_events.h"
  34
  35/*
  36 * The least significant 2 bits of the owner value has the following
  37 * meanings when set.
  38 *  - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
  39 *  - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock
  40 *
  41 * When the rwsem is reader-owned and a spinning writer has timed out,
  42 * the nonspinnable bit will be set to disable optimistic spinning.
  43
  44 * When a writer acquires a rwsem, it puts its task_struct pointer
  45 * into the owner field. It is cleared after an unlock.
  46 *
  47 * When a reader acquires a rwsem, it will also puts its task_struct
  48 * pointer into the owner field with the RWSEM_READER_OWNED bit set.
  49 * On unlock, the owner field will largely be left untouched. So
  50 * for a free or reader-owned rwsem, the owner value may contain
  51 * information about the last reader that acquires the rwsem.
  52 *
  53 * That information may be helpful in debugging cases where the system
  54 * seems to hang on a reader owned rwsem especially if only one reader
  55 * is involved. Ideally we would like to track all the readers that own
  56 * a rwsem, but the overhead is simply too big.
  57 *
  58 * A fast path reader optimistic lock stealing is supported when the rwsem
  59 * is previously owned by a writer and the following conditions are met:
  60 *  - rwsem is not currently writer owned
  61 *  - the handoff isn't set.
  62 */
  63#define RWSEM_READER_OWNED      (1UL << 0)
  64#define RWSEM_NONSPINNABLE      (1UL << 1)
  65#define RWSEM_OWNER_FLAGS_MASK  (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
  66
  67#ifdef CONFIG_DEBUG_RWSEMS
  68# define DEBUG_RWSEMS_WARN_ON(c, sem)   do {                    \
  69        if (!debug_locks_silent &&                              \
  70            WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
  71                #c, atomic_long_read(&(sem)->count),            \
  72                (unsigned long) sem->magic,                     \
  73                atomic_long_read(&(sem)->owner), (long)current, \
  74                list_empty(&(sem)->wait_list) ? "" : "not "))   \
  75                        debug_locks_off();                      \
  76        } while (0)
  77#else
  78# define DEBUG_RWSEMS_WARN_ON(c, sem)
  79#endif
  80
  81/*
  82 * On 64-bit architectures, the bit definitions of the count are:
  83 *
  84 * Bit  0    - writer locked bit
  85 * Bit  1    - waiters present bit
  86 * Bit  2    - lock handoff bit
  87 * Bits 3-7  - reserved
  88 * Bits 8-62 - 55-bit reader count
  89 * Bit  63   - read fail bit
  90 *
  91 * On 32-bit architectures, the bit definitions of the count are:
  92 *
  93 * Bit  0    - writer locked bit
  94 * Bit  1    - waiters present bit
  95 * Bit  2    - lock handoff bit
  96 * Bits 3-7  - reserved
  97 * Bits 8-30 - 23-bit reader count
  98 * Bit  31   - read fail bit
  99 *
 100 * It is not likely that the most significant bit (read fail bit) will ever
 101 * be set. This guard bit is still checked anyway in the down_read() fastpath
 102 * just in case we need to use up more of the reader bits for other purpose
 103 * in the future.
 104 *
 105 * atomic_long_fetch_add() is used to obtain reader lock, whereas
 106 * atomic_long_cmpxchg() will be used to obtain writer lock.
 107 *
 108 * There are three places where the lock handoff bit may be set or cleared.
 109 * 1) rwsem_mark_wake() for readers             -- set, clear
 110 * 2) rwsem_try_write_lock() for writers        -- set, clear
 111 * 3) rwsem_del_waiter()                        -- clear
 112 *
 113 * For all the above cases, wait_lock will be held. A writer must also
 114 * be the first one in the wait_list to be eligible for setting the handoff
 115 * bit. So concurrent setting/clearing of handoff bit is not possible.
 116 */
 117#define RWSEM_WRITER_LOCKED     (1UL << 0)
 118#define RWSEM_FLAG_WAITERS      (1UL << 1)
 119#define RWSEM_FLAG_HANDOFF      (1UL << 2)
 120#define RWSEM_FLAG_READFAIL     (1UL << (BITS_PER_LONG - 1))
 121
 122#define RWSEM_READER_SHIFT      8
 123#define RWSEM_READER_BIAS       (1UL << RWSEM_READER_SHIFT)
 124#define RWSEM_READER_MASK       (~(RWSEM_READER_BIAS - 1))
 125#define RWSEM_WRITER_MASK       RWSEM_WRITER_LOCKED
 126#define RWSEM_LOCK_MASK         (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
 127#define RWSEM_READ_FAILED_MASK  (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
 128                                 RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
 129
 130/*
 131 * All writes to owner are protected by WRITE_ONCE() to make sure that
 132 * store tearing can't happen as optimistic spinners may read and use
 133 * the owner value concurrently without lock. Read from owner, however,
 134 * may not need READ_ONCE() as long as the pointer value is only used
 135 * for comparison and isn't being dereferenced.
 136 */
 137static inline void rwsem_set_owner(struct rw_semaphore *sem)
 138{
 139        atomic_long_set(&sem->owner, (long)current);
 140}
 141
 142static inline void rwsem_clear_owner(struct rw_semaphore *sem)
 143{
 144        atomic_long_set(&sem->owner, 0);
 145}
 146
 147/*
 148 * Test the flags in the owner field.
 149 */
 150static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
 151{
 152        return atomic_long_read(&sem->owner) & flags;
 153}
 154
 155/*
 156 * The task_struct pointer of the last owning reader will be left in
 157 * the owner field.
 158 *
 159 * Note that the owner value just indicates the task has owned the rwsem
 160 * previously, it may not be the real owner or one of the real owners
 161 * anymore when that field is examined, so take it with a grain of salt.
 162 *
 163 * The reader non-spinnable bit is preserved.
 164 */
 165static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
 166                                            struct task_struct *owner)
 167{
 168        unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
 169                (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE);
 170
 171        atomic_long_set(&sem->owner, val);
 172}
 173
 174static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
 175{
 176        __rwsem_set_reader_owned(sem, current);
 177}
 178
 179/*
 180 * Return true if the rwsem is owned by a reader.
 181 */
 182static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
 183{
 184#ifdef CONFIG_DEBUG_RWSEMS
 185        /*
 186         * Check the count to see if it is write-locked.
 187         */
 188        long count = atomic_long_read(&sem->count);
 189
 190        if (count & RWSEM_WRITER_MASK)
 191                return false;
 192#endif
 193        return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
 194}
 195
 196#ifdef CONFIG_DEBUG_RWSEMS
 197/*
 198 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
 199 * is a task pointer in owner of a reader-owned rwsem, it will be the
 200 * real owner or one of the real owners. The only exception is when the
 201 * unlock is done by up_read_non_owner().
 202 */
 203static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
 204{
 205        unsigned long val = atomic_long_read(&sem->owner);
 206
 207        while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
 208                if (atomic_long_try_cmpxchg(&sem->owner, &val,
 209                                            val & RWSEM_OWNER_FLAGS_MASK))
 210                        return;
 211        }
 212}
 213#else
 214static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
 215{
 216}
 217#endif
 218
 219/*
 220 * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
 221 * remains set. Otherwise, the operation will be aborted.
 222 */
 223static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
 224{
 225        unsigned long owner = atomic_long_read(&sem->owner);
 226
 227        do {
 228                if (!(owner & RWSEM_READER_OWNED))
 229                        break;
 230                if (owner & RWSEM_NONSPINNABLE)
 231                        break;
 232        } while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
 233                                          owner | RWSEM_NONSPINNABLE));
 234}
 235
 236static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
 237{
 238        *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
 239
 240        if (WARN_ON_ONCE(*cntp < 0))
 241                rwsem_set_nonspinnable(sem);
 242
 243        if (!(*cntp & RWSEM_READ_FAILED_MASK)) {
 244                rwsem_set_reader_owned(sem);
 245                return true;
 246        }
 247
 248        return false;
 249}
 250
 251static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
 252{
 253        long tmp = RWSEM_UNLOCKED_VALUE;
 254
 255        if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
 256                rwsem_set_owner(sem);
 257                return true;
 258        }
 259
 260        return false;
 261}
 262
 263/*
 264 * Return just the real task structure pointer of the owner
 265 */
 266static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
 267{
 268        return (struct task_struct *)
 269                (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
 270}
 271
 272/*
 273 * Return the real task structure pointer of the owner and the embedded
 274 * flags in the owner. pflags must be non-NULL.
 275 */
 276static inline struct task_struct *
 277rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
 278{
 279        unsigned long owner = atomic_long_read(&sem->owner);
 280
 281        *pflags = owner & RWSEM_OWNER_FLAGS_MASK;
 282        return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
 283}
 284
 285/*
 286 * Guide to the rw_semaphore's count field.
 287 *
 288 * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
 289 * by a writer.
 290 *
 291 * The lock is owned by readers when
 292 * (1) the RWSEM_WRITER_LOCKED isn't set in count,
 293 * (2) some of the reader bits are set in count, and
 294 * (3) the owner field has RWSEM_READ_OWNED bit set.
 295 *
 296 * Having some reader bits set is not enough to guarantee a readers owned
 297 * lock as the readers may be in the process of backing out from the count
 298 * and a writer has just released the lock. So another writer may steal
 299 * the lock immediately after that.
 300 */
 301
 302/*
 303 * Initialize an rwsem:
 304 */
 305void __init_rwsem(struct rw_semaphore *sem, const char *name,
 306                  struct lock_class_key *key)
 307{
 308#ifdef CONFIG_DEBUG_LOCK_ALLOC
 309        /*
 310         * Make sure we are not reinitializing a held semaphore:
 311         */
 312        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
 313        lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
 314#endif
 315#ifdef CONFIG_DEBUG_RWSEMS
 316        sem->magic = sem;
 317#endif
 318        atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
 319        raw_spin_lock_init(&sem->wait_lock);
 320        INIT_LIST_HEAD(&sem->wait_list);
 321        atomic_long_set(&sem->owner, 0L);
 322#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 323        osq_lock_init(&sem->osq);
 324#endif
 325}
 326EXPORT_SYMBOL(__init_rwsem);
 327
 328enum rwsem_waiter_type {
 329        RWSEM_WAITING_FOR_WRITE,
 330        RWSEM_WAITING_FOR_READ
 331};
 332
 333struct rwsem_waiter {
 334        struct list_head list;
 335        struct task_struct *task;
 336        enum rwsem_waiter_type type;
 337        unsigned long timeout;
 338        bool handoff_set;
 339};
 340#define rwsem_first_waiter(sem) \
 341        list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
 342
 343enum rwsem_wake_type {
 344        RWSEM_WAKE_ANY,         /* Wake whatever's at head of wait list */
 345        RWSEM_WAKE_READERS,     /* Wake readers only */
 346        RWSEM_WAKE_READ_OWNED   /* Waker thread holds the read lock */
 347};
 348
 349/*
 350 * The typical HZ value is either 250 or 1000. So set the minimum waiting
 351 * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
 352 * queue before initiating the handoff protocol.
 353 */
 354#define RWSEM_WAIT_TIMEOUT      DIV_ROUND_UP(HZ, 250)
 355
 356/*
 357 * Magic number to batch-wakeup waiting readers, even when writers are
 358 * also present in the queue. This both limits the amount of work the
 359 * waking thread must do and also prevents any potential counter overflow,
 360 * however unlikely.
 361 */
 362#define MAX_READERS_WAKEUP      0x100
 363
 364static inline void
 365rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
 366{
 367        lockdep_assert_held(&sem->wait_lock);
 368        list_add_tail(&waiter->list, &sem->wait_list);
 369        /* caller will set RWSEM_FLAG_WAITERS */
 370}
 371
 372/*
 373 * Remove a waiter from the wait_list and clear flags.
 374 *
 375 * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of
 376 * this function. Modify with care.
 377 *
 378 * Return: true if wait_list isn't empty and false otherwise
 379 */
 380static inline bool
 381rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
 382{
 383        lockdep_assert_held(&sem->wait_lock);
 384        list_del(&waiter->list);
 385        if (likely(!list_empty(&sem->wait_list)))
 386                return true;
 387
 388        atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count);
 389        return false;
 390}
 391
 392/*
 393 * handle the lock release when processes blocked on it that can now run
 394 * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
 395 *   have been set.
 396 * - there must be someone on the queue
 397 * - the wait_lock must be held by the caller
 398 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
 399 *   to actually wakeup the blocked task(s) and drop the reference count,
 400 *   preferably when the wait_lock is released
 401 * - woken process blocks are discarded from the list after having task zeroed
 402 * - writers are only marked woken if downgrading is false
 403 *
 404 * Implies rwsem_del_waiter() for all woken readers.
 405 */
 406static void rwsem_mark_wake(struct rw_semaphore *sem,
 407                            enum rwsem_wake_type wake_type,
 408                            struct wake_q_head *wake_q)
 409{
 410        struct rwsem_waiter *waiter, *tmp;
 411        long oldcount, woken = 0, adjustment = 0;
 412        struct list_head wlist;
 413
 414        lockdep_assert_held(&sem->wait_lock);
 415
 416        /*
 417         * Take a peek at the queue head waiter such that we can determine
 418         * the wakeup(s) to perform.
 419         */
 420        waiter = rwsem_first_waiter(sem);
 421
 422        if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
 423                if (wake_type == RWSEM_WAKE_ANY) {
 424                        /*
 425                         * Mark writer at the front of the queue for wakeup.
 426                         * Until the task is actually later awoken later by
 427                         * the caller, other writers are able to steal it.
 428                         * Readers, on the other hand, will block as they
 429                         * will notice the queued writer.
 430                         */
 431                        wake_q_add(wake_q, waiter->task);
 432                        lockevent_inc(rwsem_wake_writer);
 433                }
 434
 435                return;
 436        }
 437
 438        /*
 439         * No reader wakeup if there are too many of them already.
 440         */
 441        if (unlikely(atomic_long_read(&sem->count) < 0))
 442                return;
 443
 444        /*
 445         * Writers might steal the lock before we grant it to the next reader.
 446         * We prefer to do the first reader grant before counting readers
 447         * so we can bail out early if a writer stole the lock.
 448         */
 449        if (wake_type != RWSEM_WAKE_READ_OWNED) {
 450                struct task_struct *owner;
 451
 452                adjustment = RWSEM_READER_BIAS;
 453                oldcount = atomic_long_fetch_add(adjustment, &sem->count);
 454                if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
 455                        /*
 456                         * When we've been waiting "too" long (for writers
 457                         * to give up the lock), request a HANDOFF to
 458                         * force the issue.
 459                         */
 460                        if (time_after(jiffies, waiter->timeout)) {
 461                                if (!(oldcount & RWSEM_FLAG_HANDOFF)) {
 462                                        adjustment -= RWSEM_FLAG_HANDOFF;
 463                                        lockevent_inc(rwsem_rlock_handoff);
 464                                }
 465                                waiter->handoff_set = true;
 466                        }
 467
 468                        atomic_long_add(-adjustment, &sem->count);
 469                        return;
 470                }
 471                /*
 472                 * Set it to reader-owned to give spinners an early
 473                 * indication that readers now have the lock.
 474                 * The reader nonspinnable bit seen at slowpath entry of
 475                 * the reader is copied over.
 476                 */
 477                owner = waiter->task;
 478                __rwsem_set_reader_owned(sem, owner);
 479        }
 480
 481        /*
 482         * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
 483         * queue. We know that the woken will be at least 1 as we accounted
 484         * for above. Note we increment the 'active part' of the count by the
 485         * number of readers before waking any processes up.
 486         *
 487         * This is an adaptation of the phase-fair R/W locks where at the
 488         * reader phase (first waiter is a reader), all readers are eligible
 489         * to acquire the lock at the same time irrespective of their order
 490         * in the queue. The writers acquire the lock according to their
 491         * order in the queue.
 492         *
 493         * We have to do wakeup in 2 passes to prevent the possibility that
 494         * the reader count may be decremented before it is incremented. It
 495         * is because the to-be-woken waiter may not have slept yet. So it
 496         * may see waiter->task got cleared, finish its critical section and
 497         * do an unlock before the reader count increment.
 498         *
 499         * 1) Collect the read-waiters in a separate list, count them and
 500         *    fully increment the reader count in rwsem.
 501         * 2) For each waiters in the new list, clear waiter->task and
 502         *    put them into wake_q to be woken up later.
 503         */
 504        INIT_LIST_HEAD(&wlist);
 505        list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
 506                if (waiter->type == RWSEM_WAITING_FOR_WRITE)
 507                        continue;
 508
 509                woken++;
 510                list_move_tail(&waiter->list, &wlist);
 511
 512                /*
 513                 * Limit # of readers that can be woken up per wakeup call.
 514                 */
 515                if (unlikely(woken >= MAX_READERS_WAKEUP))
 516                        break;
 517        }
 518
 519        adjustment = woken * RWSEM_READER_BIAS - adjustment;
 520        lockevent_cond_inc(rwsem_wake_reader, woken);
 521
 522        oldcount = atomic_long_read(&sem->count);
 523        if (list_empty(&sem->wait_list)) {
 524                /*
 525                 * Combined with list_move_tail() above, this implies
 526                 * rwsem_del_waiter().
 527                 */
 528                adjustment -= RWSEM_FLAG_WAITERS;
 529                if (oldcount & RWSEM_FLAG_HANDOFF)
 530                        adjustment -= RWSEM_FLAG_HANDOFF;
 531        } else if (woken) {
 532                /*
 533                 * When we've woken a reader, we no longer need to force
 534                 * writers to give up the lock and we can clear HANDOFF.
 535                 */
 536                if (oldcount & RWSEM_FLAG_HANDOFF)
 537                        adjustment -= RWSEM_FLAG_HANDOFF;
 538        }
 539
 540        if (adjustment)
 541                atomic_long_add(adjustment, &sem->count);
 542
 543        /* 2nd pass */
 544        list_for_each_entry_safe(waiter, tmp, &wlist, list) {
 545                struct task_struct *tsk;
 546
 547                tsk = waiter->task;
 548                get_task_struct(tsk);
 549
 550                /*
 551                 * Ensure calling get_task_struct() before setting the reader
 552                 * waiter to nil such that rwsem_down_read_slowpath() cannot
 553                 * race with do_exit() by always holding a reference count
 554                 * to the task to wakeup.
 555                 */
 556                smp_store_release(&waiter->task, NULL);
 557                /*
 558                 * Ensure issuing the wakeup (either by us or someone else)
 559                 * after setting the reader waiter to nil.
 560                 */
 561                wake_q_add_safe(wake_q, tsk);
 562        }
 563}
 564
 565/*
 566 * Remove a waiter and try to wake up other waiters in the wait queue
 567 * This function is called from the out_nolock path of both the reader and
 568 * writer slowpaths with wait_lock held. It releases the wait_lock and
 569 * optionally wake up waiters before it returns.
 570 */
 571static inline void
 572rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter,
 573                      struct wake_q_head *wake_q)
 574                      __releases(&sem->wait_lock)
 575{
 576        bool first = rwsem_first_waiter(sem) == waiter;
 577
 578        wake_q_init(wake_q);
 579
 580        /*
 581         * If the wait_list isn't empty and the waiter to be deleted is
 582         * the first waiter, we wake up the remaining waiters as they may
 583         * be eligible to acquire or spin on the lock.
 584         */
 585        if (rwsem_del_waiter(sem, waiter) && first)
 586                rwsem_mark_wake(sem, RWSEM_WAKE_ANY, wake_q);
 587        raw_spin_unlock_irq(&sem->wait_lock);
 588        if (!wake_q_empty(wake_q))
 589                wake_up_q(wake_q);
 590}
 591
 592/*
 593 * This function must be called with the sem->wait_lock held to prevent
 594 * race conditions between checking the rwsem wait list and setting the
 595 * sem->count accordingly.
 596 *
 597 * Implies rwsem_del_waiter() on success.
 598 */
 599static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
 600                                        struct rwsem_waiter *waiter)
 601{
 602        struct rwsem_waiter *first = rwsem_first_waiter(sem);
 603        long count, new;
 604
 605        lockdep_assert_held(&sem->wait_lock);
 606
 607        count = atomic_long_read(&sem->count);
 608        do {
 609                bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
 610
 611                if (has_handoff) {
 612                        /*
 613                         * Honor handoff bit and yield only when the first
 614                         * waiter is the one that set it. Otherwisee, we
 615                         * still try to acquire the rwsem.
 616                         */
 617                        if (first->handoff_set && (waiter != first))
 618                                return false;
 619
 620                        /*
 621                         * First waiter can inherit a previously set handoff
 622                         * bit and spin on rwsem if lock acquisition fails.
 623                         */
 624                        if (waiter == first)
 625                                waiter->handoff_set = true;
 626                }
 627
 628                new = count;
 629
 630                if (count & RWSEM_LOCK_MASK) {
 631                        if (has_handoff || (!rt_task(waiter->task) &&
 632                                            !time_after(jiffies, waiter->timeout)))
 633                                return false;
 634
 635                        new |= RWSEM_FLAG_HANDOFF;
 636                } else {
 637                        new |= RWSEM_WRITER_LOCKED;
 638                        new &= ~RWSEM_FLAG_HANDOFF;
 639
 640                        if (list_is_singular(&sem->wait_list))
 641                                new &= ~RWSEM_FLAG_WAITERS;
 642                }
 643        } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
 644
 645        /*
 646         * We have either acquired the lock with handoff bit cleared or
 647         * set the handoff bit.
 648         */
 649        if (new & RWSEM_FLAG_HANDOFF) {
 650                waiter->handoff_set = true;
 651                lockevent_inc(rwsem_wlock_handoff);
 652                return false;
 653        }
 654
 655        /*
 656         * Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on
 657         * success.
 658         */
 659        list_del(&waiter->list);
 660        rwsem_set_owner(sem);
 661        return true;
 662}
 663
 664/*
 665 * The rwsem_spin_on_owner() function returns the following 4 values
 666 * depending on the lock owner state.
 667 *   OWNER_NULL  : owner is currently NULL
 668 *   OWNER_WRITER: when owner changes and is a writer
 669 *   OWNER_READER: when owner changes and the new owner may be a reader.
 670 *   OWNER_NONSPINNABLE:
 671 *                 when optimistic spinning has to stop because either the
 672 *                 owner stops running, is unknown, or its timeslice has
 673 *                 been used up.
 674 */
 675enum owner_state {
 676        OWNER_NULL              = 1 << 0,
 677        OWNER_WRITER            = 1 << 1,
 678        OWNER_READER            = 1 << 2,
 679        OWNER_NONSPINNABLE      = 1 << 3,
 680};
 681
 682#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 683/*
 684 * Try to acquire write lock before the writer has been put on wait queue.
 685 */
 686static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
 687{
 688        long count = atomic_long_read(&sem->count);
 689
 690        while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
 691                if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
 692                                        count | RWSEM_WRITER_LOCKED)) {
 693                        rwsem_set_owner(sem);
 694                        lockevent_inc(rwsem_opt_lock);
 695                        return true;
 696                }
 697        }
 698        return false;
 699}
 700
 701static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
 702{
 703        struct task_struct *owner;
 704        unsigned long flags;
 705        bool ret = true;
 706
 707        if (need_resched()) {
 708                lockevent_inc(rwsem_opt_fail);
 709                return false;
 710        }
 711
 712        preempt_disable();
 713        /*
 714         * Disable preemption is equal to the RCU read-side crital section,
 715         * thus the task_strcut structure won't go away.
 716         */
 717        owner = rwsem_owner_flags(sem, &flags);
 718        /*
 719         * Don't check the read-owner as the entry may be stale.
 720         */
 721        if ((flags & RWSEM_NONSPINNABLE) ||
 722            (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
 723                ret = false;
 724        preempt_enable();
 725
 726        lockevent_cond_inc(rwsem_opt_fail, !ret);
 727        return ret;
 728}
 729
 730#define OWNER_SPINNABLE         (OWNER_NULL | OWNER_WRITER | OWNER_READER)
 731
 732static inline enum owner_state
 733rwsem_owner_state(struct task_struct *owner, unsigned long flags)
 734{
 735        if (flags & RWSEM_NONSPINNABLE)
 736                return OWNER_NONSPINNABLE;
 737
 738        if (flags & RWSEM_READER_OWNED)
 739                return OWNER_READER;
 740
 741        return owner ? OWNER_WRITER : OWNER_NULL;
 742}
 743
 744static noinline enum owner_state
 745rwsem_spin_on_owner(struct rw_semaphore *sem)
 746{
 747        struct task_struct *new, *owner;
 748        unsigned long flags, new_flags;
 749        enum owner_state state;
 750
 751        lockdep_assert_preemption_disabled();
 752
 753        owner = rwsem_owner_flags(sem, &flags);
 754        state = rwsem_owner_state(owner, flags);
 755        if (state != OWNER_WRITER)
 756                return state;
 757
 758        for (;;) {
 759                /*
 760                 * When a waiting writer set the handoff flag, it may spin
 761                 * on the owner as well. Once that writer acquires the lock,
 762                 * we can spin on it. So we don't need to quit even when the
 763                 * handoff bit is set.
 764                 */
 765                new = rwsem_owner_flags(sem, &new_flags);
 766                if ((new != owner) || (new_flags != flags)) {
 767                        state = rwsem_owner_state(new, new_flags);
 768                        break;
 769                }
 770
 771                /*
 772                 * Ensure we emit the owner->on_cpu, dereference _after_
 773                 * checking sem->owner still matches owner, if that fails,
 774                 * owner might point to free()d memory, if it still matches,
 775                 * our spinning context already disabled preemption which is
 776                 * equal to RCU read-side crital section ensures the memory
 777                 * stays valid.
 778                 */
 779                barrier();
 780
 781                if (need_resched() || !owner_on_cpu(owner)) {
 782                        state = OWNER_NONSPINNABLE;
 783                        break;
 784                }
 785
 786                cpu_relax();
 787        }
 788
 789        return state;
 790}
 791
 792/*
 793 * Calculate reader-owned rwsem spinning threshold for writer
 794 *
 795 * The more readers own the rwsem, the longer it will take for them to
 796 * wind down and free the rwsem. So the empirical formula used to
 797 * determine the actual spinning time limit here is:
 798 *
 799 *   Spinning threshold = (10 + nr_readers/2)us
 800 *
 801 * The limit is capped to a maximum of 25us (30 readers). This is just
 802 * a heuristic and is subjected to change in the future.
 803 */
 804static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
 805{
 806        long count = atomic_long_read(&sem->count);
 807        int readers = count >> RWSEM_READER_SHIFT;
 808        u64 delta;
 809
 810        if (readers > 30)
 811                readers = 30;
 812        delta = (20 + readers) * NSEC_PER_USEC / 2;
 813
 814        return sched_clock() + delta;
 815}
 816
 817static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
 818{
 819        bool taken = false;
 820        int prev_owner_state = OWNER_NULL;
 821        int loop = 0;
 822        u64 rspin_threshold = 0;
 823
 824        preempt_disable();
 825
 826        /* sem->wait_lock should not be held when doing optimistic spinning */
 827        if (!osq_lock(&sem->osq))
 828                goto done;
 829
 830        /*
 831         * Optimistically spin on the owner field and attempt to acquire the
 832         * lock whenever the owner changes. Spinning will be stopped when:
 833         *  1) the owning writer isn't running; or
 834         *  2) readers own the lock and spinning time has exceeded limit.
 835         */
 836        for (;;) {
 837                enum owner_state owner_state;
 838
 839                owner_state = rwsem_spin_on_owner(sem);
 840                if (!(owner_state & OWNER_SPINNABLE))
 841                        break;
 842
 843                /*
 844                 * Try to acquire the lock
 845                 */
 846                taken = rwsem_try_write_lock_unqueued(sem);
 847
 848                if (taken)
 849                        break;
 850
 851                /*
 852                 * Time-based reader-owned rwsem optimistic spinning
 853                 */
 854                if (owner_state == OWNER_READER) {
 855                        /*
 856                         * Re-initialize rspin_threshold every time when
 857                         * the owner state changes from non-reader to reader.
 858                         * This allows a writer to steal the lock in between
 859                         * 2 reader phases and have the threshold reset at
 860                         * the beginning of the 2nd reader phase.
 861                         */
 862                        if (prev_owner_state != OWNER_READER) {
 863                                if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
 864                                        break;
 865                                rspin_threshold = rwsem_rspin_threshold(sem);
 866                                loop = 0;
 867                        }
 868
 869                        /*
 870                         * Check time threshold once every 16 iterations to
 871                         * avoid calling sched_clock() too frequently so
 872                         * as to reduce the average latency between the times
 873                         * when the lock becomes free and when the spinner
 874                         * is ready to do a trylock.
 875                         */
 876                        else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
 877                                rwsem_set_nonspinnable(sem);
 878                                lockevent_inc(rwsem_opt_nospin);
 879                                break;
 880                        }
 881                }
 882
 883                /*
 884                 * An RT task cannot do optimistic spinning if it cannot
 885                 * be sure the lock holder is running or live-lock may
 886                 * happen if the current task and the lock holder happen
 887                 * to run in the same CPU. However, aborting optimistic
 888                 * spinning while a NULL owner is detected may miss some
 889                 * opportunity where spinning can continue without causing
 890                 * problem.
 891                 *
 892                 * There are 2 possible cases where an RT task may be able
 893                 * to continue spinning.
 894                 *
 895                 * 1) The lock owner is in the process of releasing the
 896                 *    lock, sem->owner is cleared but the lock has not
 897                 *    been released yet.
 898                 * 2) The lock was free and owner cleared, but another
 899                 *    task just comes in and acquire the lock before
 900                 *    we try to get it. The new owner may be a spinnable
 901                 *    writer.
 902                 *
 903                 * To take advantage of two scenarios listed above, the RT
 904                 * task is made to retry one more time to see if it can
 905                 * acquire the lock or continue spinning on the new owning
 906                 * writer. Of course, if the time lag is long enough or the
 907                 * new owner is not a writer or spinnable, the RT task will
 908                 * quit spinning.
 909                 *
 910                 * If the owner is a writer, the need_resched() check is
 911                 * done inside rwsem_spin_on_owner(). If the owner is not
 912                 * a writer, need_resched() check needs to be done here.
 913                 */
 914                if (owner_state != OWNER_WRITER) {
 915                        if (need_resched())
 916                                break;
 917                        if (rt_task(current) &&
 918                           (prev_owner_state != OWNER_WRITER))
 919                                break;
 920                }
 921                prev_owner_state = owner_state;
 922
 923                /*
 924                 * The cpu_relax() call is a compiler barrier which forces
 925                 * everything in this loop to be re-loaded. We don't need
 926                 * memory barriers as we'll eventually observe the right
 927                 * values at the cost of a few extra spins.
 928                 */
 929                cpu_relax();
 930        }
 931        osq_unlock(&sem->osq);
 932done:
 933        preempt_enable();
 934        lockevent_cond_inc(rwsem_opt_fail, !taken);
 935        return taken;
 936}
 937
 938/*
 939 * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should
 940 * only be called when the reader count reaches 0.
 941 */
 942static inline void clear_nonspinnable(struct rw_semaphore *sem)
 943{
 944        if (unlikely(rwsem_test_oflags(sem, RWSEM_NONSPINNABLE)))
 945                atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner);
 946}
 947
 948#else
 949static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
 950{
 951        return false;
 952}
 953
 954static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem)
 955{
 956        return false;
 957}
 958
 959static inline void clear_nonspinnable(struct rw_semaphore *sem) { }
 960
 961static inline enum owner_state
 962rwsem_spin_on_owner(struct rw_semaphore *sem)
 963{
 964        return OWNER_NONSPINNABLE;
 965}
 966#endif
 967
 968/*
 969 * Prepare to wake up waiter(s) in the wait queue by putting them into the
 970 * given wake_q if the rwsem lock owner isn't a writer. If rwsem is likely
 971 * reader-owned, wake up read lock waiters in queue front or wake up any
 972 * front waiter otherwise.
 973
 974 * This is being called from both reader and writer slow paths.
 975 */
 976static inline void rwsem_cond_wake_waiter(struct rw_semaphore *sem, long count,
 977                                          struct wake_q_head *wake_q)
 978{
 979        enum rwsem_wake_type wake_type;
 980
 981        if (count & RWSEM_WRITER_MASK)
 982                return;
 983
 984        if (count & RWSEM_READER_MASK) {
 985                wake_type = RWSEM_WAKE_READERS;
 986        } else {
 987                wake_type = RWSEM_WAKE_ANY;
 988                clear_nonspinnable(sem);
 989        }
 990        rwsem_mark_wake(sem, wake_type, wake_q);
 991}
 992
 993/*
 994 * Wait for the read lock to be granted
 995 */
 996static struct rw_semaphore __sched *
 997rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state)
 998{
 999        long adjustment = -RWSEM_READER_BIAS;
1000        long rcnt = (count >> RWSEM_READER_SHIFT);
1001        struct rwsem_waiter waiter;
1002        DEFINE_WAKE_Q(wake_q);
1003
1004        /*
1005         * To prevent a constant stream of readers from starving a sleeping
1006         * waiter, don't attempt optimistic lock stealing if the lock is
1007         * currently owned by readers.
1008         */
1009        if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) &&
1010            (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED))
1011                goto queue;
1012
1013        /*
1014         * Reader optimistic lock stealing.
1015         */
1016        if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) {
1017                rwsem_set_reader_owned(sem);
1018                lockevent_inc(rwsem_rlock_steal);
1019
1020                /*
1021                 * Wake up other readers in the wait queue if it is
1022                 * the first reader.
1023                 */
1024                if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) {
1025                        raw_spin_lock_irq(&sem->wait_lock);
1026                        if (!list_empty(&sem->wait_list))
1027                                rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
1028                                                &wake_q);
1029                        raw_spin_unlock_irq(&sem->wait_lock);
1030                        wake_up_q(&wake_q);
1031                }
1032                return sem;
1033        }
1034
1035queue:
1036        waiter.task = current;
1037        waiter.type = RWSEM_WAITING_FOR_READ;
1038        waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1039        waiter.handoff_set = false;
1040
1041        raw_spin_lock_irq(&sem->wait_lock);
1042        if (list_empty(&sem->wait_list)) {
1043                /*
1044                 * In case the wait queue is empty and the lock isn't owned
1045                 * by a writer, this reader can exit the slowpath and return
1046                 * immediately as its RWSEM_READER_BIAS has already been set
1047                 * in the count.
1048                 */
1049                if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) {
1050                        /* Provide lock ACQUIRE */
1051                        smp_acquire__after_ctrl_dep();
1052                        raw_spin_unlock_irq(&sem->wait_lock);
1053                        rwsem_set_reader_owned(sem);
1054                        lockevent_inc(rwsem_rlock_fast);
1055                        return sem;
1056                }
1057                adjustment += RWSEM_FLAG_WAITERS;
1058        }
1059        rwsem_add_waiter(sem, &waiter);
1060
1061        /* we're now waiting on the lock, but no longer actively locking */
1062        count = atomic_long_add_return(adjustment, &sem->count);
1063
1064        rwsem_cond_wake_waiter(sem, count, &wake_q);
1065        raw_spin_unlock_irq(&sem->wait_lock);
1066
1067        if (!wake_q_empty(&wake_q))
1068                wake_up_q(&wake_q);
1069
1070        trace_contention_begin(sem, LCB_F_READ);
1071
1072        /* wait to be given the lock */
1073        for (;;) {
1074                set_current_state(state);
1075                if (!smp_load_acquire(&waiter.task)) {
1076                        /* Matches rwsem_mark_wake()'s smp_store_release(). */
1077                        break;
1078                }
1079                if (signal_pending_state(state, current)) {
1080                        raw_spin_lock_irq(&sem->wait_lock);
1081                        if (waiter.task)
1082                                goto out_nolock;
1083                        raw_spin_unlock_irq(&sem->wait_lock);
1084                        /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
1085                        break;
1086                }
1087                schedule();
1088                lockevent_inc(rwsem_sleep_reader);
1089        }
1090
1091        __set_current_state(TASK_RUNNING);
1092        lockevent_inc(rwsem_rlock);
1093        trace_contention_end(sem, 0);
1094        return sem;
1095
1096out_nolock:
1097        rwsem_del_wake_waiter(sem, &waiter, &wake_q);
1098        __set_current_state(TASK_RUNNING);
1099        lockevent_inc(rwsem_rlock_fail);
1100        trace_contention_end(sem, -EINTR);
1101        return ERR_PTR(-EINTR);
1102}
1103
1104/*
1105 * Wait until we successfully acquire the write lock
1106 */
1107static struct rw_semaphore __sched *
1108rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1109{
1110        struct rwsem_waiter waiter;
1111        DEFINE_WAKE_Q(wake_q);
1112
1113        /* do optimistic spinning and steal lock if possible */
1114        if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) {
1115                /* rwsem_optimistic_spin() implies ACQUIRE on success */
1116                return sem;
1117        }
1118
1119        /*
1120         * Optimistic spinning failed, proceed to the slowpath
1121         * and block until we can acquire the sem.
1122         */
1123        waiter.task = current;
1124        waiter.type = RWSEM_WAITING_FOR_WRITE;
1125        waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1126        waiter.handoff_set = false;
1127
1128        raw_spin_lock_irq(&sem->wait_lock);
1129        rwsem_add_waiter(sem, &waiter);
1130
1131        /* we're now waiting on the lock */
1132        if (rwsem_first_waiter(sem) != &waiter) {
1133                rwsem_cond_wake_waiter(sem, atomic_long_read(&sem->count),
1134                                       &wake_q);
1135                if (!wake_q_empty(&wake_q)) {
1136                        /*
1137                         * We want to minimize wait_lock hold time especially
1138                         * when a large number of readers are to be woken up.
1139                         */
1140                        raw_spin_unlock_irq(&sem->wait_lock);
1141                        wake_up_q(&wake_q);
1142                        raw_spin_lock_irq(&sem->wait_lock);
1143                }
1144        } else {
1145                atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1146        }
1147
1148        /* wait until we successfully acquire the lock */
1149        set_current_state(state);
1150        trace_contention_begin(sem, LCB_F_WRITE);
1151
1152        for (;;) {
1153                if (rwsem_try_write_lock(sem, &waiter)) {
1154                        /* rwsem_try_write_lock() implies ACQUIRE on success */
1155                        break;
1156                }
1157
1158                raw_spin_unlock_irq(&sem->wait_lock);
1159
1160                if (signal_pending_state(state, current))
1161                        goto out_nolock;
1162
1163                /*
1164                 * After setting the handoff bit and failing to acquire
1165                 * the lock, attempt to spin on owner to accelerate lock
1166                 * transfer. If the previous owner is a on-cpu writer and it
1167                 * has just released the lock, OWNER_NULL will be returned.
1168                 * In this case, we attempt to acquire the lock again
1169                 * without sleeping.
1170                 */
1171                if (waiter.handoff_set) {
1172                        enum owner_state owner_state;
1173
1174                        preempt_disable();
1175                        owner_state = rwsem_spin_on_owner(sem);
1176                        preempt_enable();
1177
1178                        if (owner_state == OWNER_NULL)
1179                                goto trylock_again;
1180                }
1181
1182                schedule();
1183                lockevent_inc(rwsem_sleep_writer);
1184                set_current_state(state);
1185trylock_again:
1186                raw_spin_lock_irq(&sem->wait_lock);
1187        }
1188        __set_current_state(TASK_RUNNING);
1189        raw_spin_unlock_irq(&sem->wait_lock);
1190        lockevent_inc(rwsem_wlock);
1191        trace_contention_end(sem, 0);
1192        return sem;
1193
1194out_nolock:
1195        __set_current_state(TASK_RUNNING);
1196        raw_spin_lock_irq(&sem->wait_lock);
1197        rwsem_del_wake_waiter(sem, &waiter, &wake_q);
1198        lockevent_inc(rwsem_wlock_fail);
1199        trace_contention_end(sem, -EINTR);
1200        return ERR_PTR(-EINTR);
1201}
1202
1203/*
1204 * handle waking up a waiter on the semaphore
1205 * - up_read/up_write has decremented the active part of count if we come here
1206 */
1207static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
1208{
1209        unsigned long flags;
1210        DEFINE_WAKE_Q(wake_q);
1211
1212        raw_spin_lock_irqsave(&sem->wait_lock, flags);
1213
1214        if (!list_empty(&sem->wait_list))
1215                rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1216
1217        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1218        wake_up_q(&wake_q);
1219
1220        return sem;
1221}
1222
1223/*
1224 * downgrade a write lock into a read lock
1225 * - caller incremented waiting part of count and discovered it still negative
1226 * - just wake up any readers at the front of the queue
1227 */
1228static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1229{
1230        unsigned long flags;
1231        DEFINE_WAKE_Q(wake_q);
1232
1233        raw_spin_lock_irqsave(&sem->wait_lock, flags);
1234
1235        if (!list_empty(&sem->wait_list))
1236                rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1237
1238        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1239        wake_up_q(&wake_q);
1240
1241        return sem;
1242}
1243
1244/*
1245 * lock for reading
1246 */
1247static inline int __down_read_common(struct rw_semaphore *sem, int state)
1248{
1249        long count;
1250
1251        if (!rwsem_read_trylock(sem, &count)) {
1252                if (IS_ERR(rwsem_down_read_slowpath(sem, count, state)))
1253                        return -EINTR;
1254                DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1255        }
1256        return 0;
1257}
1258
1259static inline void __down_read(struct rw_semaphore *sem)
1260{
1261        __down_read_common(sem, TASK_UNINTERRUPTIBLE);
1262}
1263
1264static inline int __down_read_interruptible(struct rw_semaphore *sem)
1265{
1266        return __down_read_common(sem, TASK_INTERRUPTIBLE);
1267}
1268
1269static inline int __down_read_killable(struct rw_semaphore *sem)
1270{
1271        return __down_read_common(sem, TASK_KILLABLE);
1272}
1273
1274static inline int __down_read_trylock(struct rw_semaphore *sem)
1275{
1276        long tmp;
1277
1278        DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1279
1280        tmp = atomic_long_read(&sem->count);
1281        while (!(tmp & RWSEM_READ_FAILED_MASK)) {
1282                if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1283                                                    tmp + RWSEM_READER_BIAS)) {
1284                        rwsem_set_reader_owned(sem);
1285                        return 1;
1286                }
1287        }
1288        return 0;
1289}
1290
1291/*
1292 * lock for writing
1293 */
1294static inline int __down_write_common(struct rw_semaphore *sem, int state)
1295{
1296        if (unlikely(!rwsem_write_trylock(sem))) {
1297                if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
1298                        return -EINTR;
1299        }
1300
1301        return 0;
1302}
1303
1304static inline void __down_write(struct rw_semaphore *sem)
1305{
1306        __down_write_common(sem, TASK_UNINTERRUPTIBLE);
1307}
1308
1309static inline int __down_write_killable(struct rw_semaphore *sem)
1310{
1311        return __down_write_common(sem, TASK_KILLABLE);
1312}
1313
1314static inline int __down_write_trylock(struct rw_semaphore *sem)
1315{
1316        DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1317        return rwsem_write_trylock(sem);
1318}
1319
1320/*
1321 * unlock after reading
1322 */
1323static inline void __up_read(struct rw_semaphore *sem)
1324{
1325        long tmp;
1326
1327        DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1328        DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1329
1330        rwsem_clear_reader_owned(sem);
1331        tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
1332        DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
1333        if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
1334                      RWSEM_FLAG_WAITERS)) {
1335                clear_nonspinnable(sem);
1336                rwsem_wake(sem);
1337        }
1338}
1339
1340/*
1341 * unlock after writing
1342 */
1343static inline void __up_write(struct rw_semaphore *sem)
1344{
1345        long tmp;
1346
1347        DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1348        /*
1349         * sem->owner may differ from current if the ownership is transferred
1350         * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
1351         */
1352        DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1353                            !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
1354
1355        rwsem_clear_owner(sem);
1356        tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1357        if (unlikely(tmp & RWSEM_FLAG_WAITERS))
1358                rwsem_wake(sem);
1359}
1360
1361/*
1362 * downgrade write lock to read lock
1363 */
1364static inline void __downgrade_write(struct rw_semaphore *sem)
1365{
1366        long tmp;
1367
1368        /*
1369         * When downgrading from exclusive to shared ownership,
1370         * anything inside the write-locked region cannot leak
1371         * into the read side. In contrast, anything in the
1372         * read-locked region is ok to be re-ordered into the
1373         * write side. As such, rely on RELEASE semantics.
1374         */
1375        DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
1376        tmp = atomic_long_fetch_add_release(
1377                -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1378        rwsem_set_reader_owned(sem);
1379        if (tmp & RWSEM_FLAG_WAITERS)
1380                rwsem_downgrade_wake(sem);
1381}
1382
1383#else /* !CONFIG_PREEMPT_RT */
1384
1385#define RT_MUTEX_BUILD_MUTEX
1386#include "rtmutex.c"
1387
1388#define rwbase_set_and_save_current_state(state)        \
1389        set_current_state(state)
1390
1391#define rwbase_restore_current_state()                  \
1392        __set_current_state(TASK_RUNNING)
1393
1394#define rwbase_rtmutex_lock_state(rtm, state)           \
1395        __rt_mutex_lock(rtm, state)
1396
1397#define rwbase_rtmutex_slowlock_locked(rtm, state)      \
1398        __rt_mutex_slowlock_locked(rtm, NULL, state)
1399
1400#define rwbase_rtmutex_unlock(rtm)                      \
1401        __rt_mutex_unlock(rtm)
1402
1403#define rwbase_rtmutex_trylock(rtm)                     \
1404        __rt_mutex_trylock(rtm)
1405
1406#define rwbase_signal_pending_state(state, current)     \
1407        signal_pending_state(state, current)
1408
1409#define rwbase_schedule()                               \
1410        schedule()
1411
1412#include "rwbase_rt.c"
1413
1414void __init_rwsem(struct rw_semaphore *sem, const char *name,
1415                  struct lock_class_key *key)
1416{
1417        init_rwbase_rt(&(sem)->rwbase);
1418
1419#ifdef CONFIG_DEBUG_LOCK_ALLOC
1420        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
1421        lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
1422#endif
1423}
1424EXPORT_SYMBOL(__init_rwsem);
1425
1426static inline void __down_read(struct rw_semaphore *sem)
1427{
1428        rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1429}
1430
1431static inline int __down_read_interruptible(struct rw_semaphore *sem)
1432{
1433        return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE);
1434}
1435
1436static inline int __down_read_killable(struct rw_semaphore *sem)
1437{
1438        return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE);
1439}
1440
1441static inline int __down_read_trylock(struct rw_semaphore *sem)
1442{
1443        return rwbase_read_trylock(&sem->rwbase);
1444}
1445
1446static inline void __up_read(struct rw_semaphore *sem)
1447{
1448        rwbase_read_unlock(&sem->rwbase, TASK_NORMAL);
1449}
1450
1451static inline void __sched __down_write(struct rw_semaphore *sem)
1452{
1453        rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1454}
1455
1456static inline int __sched __down_write_killable(struct rw_semaphore *sem)
1457{
1458        return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE);
1459}
1460
1461static inline int __down_write_trylock(struct rw_semaphore *sem)
1462{
1463        return rwbase_write_trylock(&sem->rwbase);
1464}
1465
1466static inline void __up_write(struct rw_semaphore *sem)
1467{
1468        rwbase_write_unlock(&sem->rwbase);
1469}
1470
1471static inline void __downgrade_write(struct rw_semaphore *sem)
1472{
1473        rwbase_write_downgrade(&sem->rwbase);
1474}
1475
1476/* Debug stubs for the common API */
1477#define DEBUG_RWSEMS_WARN_ON(c, sem)
1478
1479static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
1480                                            struct task_struct *owner)
1481{
1482}
1483
1484static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
1485{
1486        int count = atomic_read(&sem->rwbase.readers);
1487
1488        return count < 0 && count != READER_BIAS;
1489}
1490
1491#endif /* CONFIG_PREEMPT_RT */
1492
1493/*
1494 * lock for reading
1495 */
1496void __sched down_read(struct rw_semaphore *sem)
1497{
1498        might_sleep();
1499        rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1500
1501        LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1502}
1503EXPORT_SYMBOL(down_read);
1504
1505int __sched down_read_interruptible(struct rw_semaphore *sem)
1506{
1507        might_sleep();
1508        rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1509
1510        if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
1511                rwsem_release(&sem->dep_map, _RET_IP_);
1512                return -EINTR;
1513        }
1514
1515        return 0;
1516}
1517EXPORT_SYMBOL(down_read_interruptible);
1518
1519int __sched down_read_killable(struct rw_semaphore *sem)
1520{
1521        might_sleep();
1522        rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1523
1524        if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1525                rwsem_release(&sem->dep_map, _RET_IP_);
1526                return -EINTR;
1527        }
1528
1529        return 0;
1530}
1531EXPORT_SYMBOL(down_read_killable);
1532
1533/*
1534 * trylock for reading -- returns 1 if successful, 0 if contention
1535 */
1536int down_read_trylock(struct rw_semaphore *sem)
1537{
1538        int ret = __down_read_trylock(sem);
1539
1540        if (ret == 1)
1541                rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1542        return ret;
1543}
1544EXPORT_SYMBOL(down_read_trylock);
1545
1546/*
1547 * lock for writing
1548 */
1549void __sched down_write(struct rw_semaphore *sem)
1550{
1551        might_sleep();
1552        rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1553        LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1554}
1555EXPORT_SYMBOL(down_write);
1556
1557/*
1558 * lock for writing
1559 */
1560int __sched down_write_killable(struct rw_semaphore *sem)
1561{
1562        might_sleep();
1563        rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1564
1565        if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1566                                  __down_write_killable)) {
1567                rwsem_release(&sem->dep_map, _RET_IP_);
1568                return -EINTR;
1569        }
1570
1571        return 0;
1572}
1573EXPORT_SYMBOL(down_write_killable);
1574
1575/*
1576 * trylock for writing -- returns 1 if successful, 0 if contention
1577 */
1578int down_write_trylock(struct rw_semaphore *sem)
1579{
1580        int ret = __down_write_trylock(sem);
1581
1582        if (ret == 1)
1583                rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
1584
1585        return ret;
1586}
1587EXPORT_SYMBOL(down_write_trylock);
1588
1589/*
1590 * release a read lock
1591 */
1592void up_read(struct rw_semaphore *sem)
1593{
1594        rwsem_release(&sem->dep_map, _RET_IP_);
1595        __up_read(sem);
1596}
1597EXPORT_SYMBOL(up_read);
1598
1599/*
1600 * release a write lock
1601 */
1602void up_write(struct rw_semaphore *sem)
1603{
1604        rwsem_release(&sem->dep_map, _RET_IP_);
1605        __up_write(sem);
1606}
1607EXPORT_SYMBOL(up_write);
1608
1609/*
1610 * downgrade write lock to read lock
1611 */
1612void downgrade_write(struct rw_semaphore *sem)
1613{
1614        lock_downgrade(&sem->dep_map, _RET_IP_);
1615        __downgrade_write(sem);
1616}
1617EXPORT_SYMBOL(downgrade_write);
1618
1619#ifdef CONFIG_DEBUG_LOCK_ALLOC
1620
1621void down_read_nested(struct rw_semaphore *sem, int subclass)
1622{
1623        might_sleep();
1624        rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1625        LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1626}
1627EXPORT_SYMBOL(down_read_nested);
1628
1629int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
1630{
1631        might_sleep();
1632        rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1633
1634        if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1635                rwsem_release(&sem->dep_map, _RET_IP_);
1636                return -EINTR;
1637        }
1638
1639        return 0;
1640}
1641EXPORT_SYMBOL(down_read_killable_nested);
1642
1643void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1644{
1645        might_sleep();
1646        rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1647        LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1648}
1649EXPORT_SYMBOL(_down_write_nest_lock);
1650
1651void down_read_non_owner(struct rw_semaphore *sem)
1652{
1653        might_sleep();
1654        __down_read(sem);
1655        __rwsem_set_reader_owned(sem, NULL);
1656}
1657EXPORT_SYMBOL(down_read_non_owner);
1658
1659void down_write_nested(struct rw_semaphore *sem, int subclass)
1660{
1661        might_sleep();
1662        rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1663        LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1664}
1665EXPORT_SYMBOL(down_write_nested);
1666
1667int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1668{
1669        might_sleep();
1670        rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1671
1672        if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1673                                  __down_write_killable)) {
1674                rwsem_release(&sem->dep_map, _RET_IP_);
1675                return -EINTR;
1676        }
1677
1678        return 0;
1679}
1680EXPORT_SYMBOL(down_write_killable_nested);
1681
1682void up_read_non_owner(struct rw_semaphore *sem)
1683{
1684        DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1685        __up_read(sem);
1686}
1687EXPORT_SYMBOL(up_read_non_owner);
1688
1689#endif
1690