linux/lib/rwsem-spinlock.c
<<
>>
Prefs
   1/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
   2 * generic spinlock implementation
   3 *
   4 * Copyright (c) 2001   David Howells (dhowells@redhat.com).
   5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
   6 * - Derived also from comments by Linus
   7 */
   8#include <linux/rwsem.h>
   9#include <linux/sched.h>
  10#include <linux/export.h>
  11
  12enum rwsem_waiter_type {
  13        RWSEM_WAITING_FOR_WRITE,
  14        RWSEM_WAITING_FOR_READ
  15};
  16
  17struct rwsem_waiter {
  18        struct list_head list;
  19        struct task_struct *task;
  20        enum rwsem_waiter_type type;
  21};
  22
  23int rwsem_is_locked(struct rw_semaphore *sem)
  24{
  25        int ret = 1;
  26        unsigned long flags;
  27
  28        if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
  29                ret = (sem->activity != 0);
  30                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  31        }
  32        return ret;
  33}
  34EXPORT_SYMBOL(rwsem_is_locked);
  35
  36/*
  37 * initialise the semaphore
  38 */
  39void __init_rwsem(struct rw_semaphore *sem, const char *name,
  40                  struct lock_class_key *key)
  41{
  42#ifdef CONFIG_DEBUG_LOCK_ALLOC
  43        /*
  44         * Make sure we are not reinitializing a held semaphore:
  45         */
  46        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
  47        lockdep_init_map(&sem->dep_map, name, key, 0);
  48#endif
  49        sem->activity = 0;
  50        raw_spin_lock_init(&sem->wait_lock);
  51        INIT_LIST_HEAD(&sem->wait_list);
  52}
  53EXPORT_SYMBOL(__init_rwsem);
  54
  55/*
  56 * handle the lock release when processes blocked on it that can now run
  57 * - if we come here, then:
  58 *   - the 'active count' _reached_ zero
  59 *   - the 'waiting count' is non-zero
  60 * - the spinlock must be held by the caller
  61 * - woken process blocks are discarded from the list after having task zeroed
  62 * - writers are only woken if wakewrite is non-zero
  63 */
  64static inline struct rw_semaphore *
  65__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
  66{
  67        struct rwsem_waiter *waiter;
  68        struct task_struct *tsk;
  69        int woken;
  70
  71        waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  72
  73        if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
  74                if (wakewrite)
  75                        /* Wake up a writer. Note that we do not grant it the
  76                         * lock - it will have to acquire it when it runs. */
  77                        wake_up_process(waiter->task);
  78                goto out;
  79        }
  80
  81        /* grant an infinite number of read locks to the front of the queue */
  82        woken = 0;
  83        do {
  84                struct list_head *next = waiter->list.next;
  85
  86                list_del(&waiter->list);
  87                tsk = waiter->task;
  88                smp_mb();
  89                waiter->task = NULL;
  90                wake_up_process(tsk);
  91                put_task_struct(tsk);
  92                woken++;
  93                if (next == &sem->wait_list)
  94                        break;
  95                waiter = list_entry(next, struct rwsem_waiter, list);
  96        } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
  97
  98        sem->activity += woken;
  99
 100 out:
 101        return sem;
 102}
 103
 104/*
 105 * wake a single writer
 106 */
 107static inline struct rw_semaphore *
 108__rwsem_wake_one_writer(struct rw_semaphore *sem)
 109{
 110        struct rwsem_waiter *waiter;
 111
 112        waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
 113        wake_up_process(waiter->task);
 114
 115        return sem;
 116}
 117
 118/*
 119 * get a read lock on the semaphore
 120 */
 121void __sched __down_read(struct rw_semaphore *sem)
 122{
 123        struct rwsem_waiter waiter;
 124        struct task_struct *tsk;
 125        unsigned long flags;
 126
 127        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 128
 129        if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
 130                /* granted */
 131                sem->activity++;
 132                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 133                goto out;
 134        }
 135
 136        tsk = current;
 137        set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 138
 139        /* set up my own style of waitqueue */
 140        waiter.task = tsk;
 141        waiter.type = RWSEM_WAITING_FOR_READ;
 142        get_task_struct(tsk);
 143
 144        list_add_tail(&waiter.list, &sem->wait_list);
 145
 146        /* we don't need to touch the semaphore struct anymore */
 147        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 148
 149        /* wait to be given the lock */
 150        for (;;) {
 151                if (!waiter.task)
 152                        break;
 153                schedule();
 154                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 155        }
 156
 157        tsk->state = TASK_RUNNING;
 158 out:
 159        ;
 160}
 161
 162/*
 163 * trylock for reading -- returns 1 if successful, 0 if contention
 164 */
 165int __down_read_trylock(struct rw_semaphore *sem)
 166{
 167        unsigned long flags;
 168        int ret = 0;
 169
 170
 171        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 172
 173        if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
 174                /* granted */
 175                sem->activity++;
 176                ret = 1;
 177        }
 178
 179        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 180
 181        return ret;
 182}
 183
 184/*
 185 * get a write lock on the semaphore
 186 */
 187void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
 188{
 189        struct rwsem_waiter waiter;
 190        struct task_struct *tsk;
 191        unsigned long flags;
 192
 193        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 194
 195        /* set up my own style of waitqueue */
 196        tsk = current;
 197        waiter.task = tsk;
 198        waiter.type = RWSEM_WAITING_FOR_WRITE;
 199        list_add_tail(&waiter.list, &sem->wait_list);
 200
 201        /* wait for someone to release the lock */
 202        for (;;) {
 203                /*
 204                 * That is the key to support write lock stealing: allows the
 205                 * task already on CPU to get the lock soon rather than put
 206                 * itself into sleep and waiting for system woke it or someone
 207                 * else in the head of the wait list up.
 208                 */
 209                if (sem->activity == 0)
 210                        break;
 211                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 212                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 213                schedule();
 214                raw_spin_lock_irqsave(&sem->wait_lock, flags);
 215        }
 216        /* got the lock */
 217        sem->activity = -1;
 218        list_del(&waiter.list);
 219
 220        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 221}
 222
 223void __sched __down_write(struct rw_semaphore *sem)
 224{
 225        __down_write_nested(sem, 0);
 226}
 227
 228/*
 229 * trylock for writing -- returns 1 if successful, 0 if contention
 230 */
 231int __down_write_trylock(struct rw_semaphore *sem)
 232{
 233        unsigned long flags;
 234        int ret = 0;
 235
 236        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 237
 238        if (sem->activity == 0) {
 239                /* got the lock */
 240                sem->activity = -1;
 241                ret = 1;
 242        }
 243
 244        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 245
 246        return ret;
 247}
 248
 249/*
 250 * release a read lock on the semaphore
 251 */
 252void __up_read(struct rw_semaphore *sem)
 253{
 254        unsigned long flags;
 255
 256        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 257
 258        if (--sem->activity == 0 && !list_empty(&sem->wait_list))
 259                sem = __rwsem_wake_one_writer(sem);
 260
 261        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 262}
 263
 264/*
 265 * release a write lock on the semaphore
 266 */
 267void __up_write(struct rw_semaphore *sem)
 268{
 269        unsigned long flags;
 270
 271        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 272
 273        sem->activity = 0;
 274        if (!list_empty(&sem->wait_list))
 275                sem = __rwsem_do_wake(sem, 1);
 276
 277        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 278}
 279
 280/*
 281 * downgrade a write lock into a read lock
 282 * - just wake up any readers at the front of the queue
 283 */
 284void __downgrade_write(struct rw_semaphore *sem)
 285{
 286        unsigned long flags;
 287
 288        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 289
 290        sem->activity = 1;
 291        if (!list_empty(&sem->wait_list))
 292                sem = __rwsem_do_wake(sem, 0);
 293
 294        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 295}
 296
 297
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.