linux/lib/rwsem-spinlock.c
<<
>>
Prefs
   1/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
   2 * generic spinlock implementation
   3 *
   4 * Copyright (c) 2001   David Howells (dhowells@redhat.com).
   5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
   6 * - Derived also from comments by Linus
   7 */
   8#include <linux/rwsem.h>
   9#include <linux/sched.h>
  10#include <linux/export.h>
  11
  12struct rwsem_waiter {
  13        struct list_head list;
  14        struct task_struct *task;
  15        unsigned int flags;
  16#define RWSEM_WAITING_FOR_READ  0x00000001
  17#define RWSEM_WAITING_FOR_WRITE 0x00000002
  18};
  19
  20int rwsem_is_locked(struct rw_semaphore *sem)
  21{
  22        int ret = 1;
  23        unsigned long flags;
  24
  25        if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
  26                ret = (sem->activity != 0);
  27                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  28        }
  29        return ret;
  30}
  31EXPORT_SYMBOL(rwsem_is_locked);
  32
  33/*
  34 * initialise the semaphore
  35 */
  36void __init_rwsem(struct rw_semaphore *sem, const char *name,
  37                  struct lock_class_key *key)
  38{
  39#ifdef CONFIG_DEBUG_LOCK_ALLOC
  40        /*
  41         * Make sure we are not reinitializing a held semaphore:
  42         */
  43        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
  44        lockdep_init_map(&sem->dep_map, name, key, 0);
  45#endif
  46        sem->activity = 0;
  47        raw_spin_lock_init(&sem->wait_lock);
  48        INIT_LIST_HEAD(&sem->wait_list);
  49}
  50EXPORT_SYMBOL(__init_rwsem);
  51
  52/*
  53 * handle the lock release when processes blocked on it that can now run
  54 * - if we come here, then:
  55 *   - the 'active count' _reached_ zero
  56 *   - the 'waiting count' is non-zero
  57 * - the spinlock must be held by the caller
  58 * - woken process blocks are discarded from the list after having task zeroed
  59 * - writers are only woken if wakewrite is non-zero
  60 */
  61static inline struct rw_semaphore *
  62__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
  63{
  64        struct rwsem_waiter *waiter;
  65        struct task_struct *tsk;
  66        int woken;
  67
  68        waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  69
  70        if (!wakewrite) {
  71                if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
  72                        goto out;
  73                goto dont_wake_writers;
  74        }
  75
  76        /*
  77         * as we support write lock stealing, we can't set sem->activity
  78         * to -1 here to indicate we get the lock. Instead, we wake it up
  79         * to let it go get it again.
  80         */
  81        if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
  82                wake_up_process(waiter->task);
  83                goto out;
  84        }
  85
  86        /* grant an infinite number of read locks to the front of the queue */
  87 dont_wake_writers:
  88        woken = 0;
  89        while (waiter->flags & RWSEM_WAITING_FOR_READ) {
  90                struct list_head *next = waiter->list.next;
  91
  92                list_del(&waiter->list);
  93                tsk = waiter->task;
  94                smp_mb();
  95                waiter->task = NULL;
  96                wake_up_process(tsk);
  97                put_task_struct(tsk);
  98                woken++;
  99                if (list_empty(&sem->wait_list))
 100                        break;
 101                waiter = list_entry(next, struct rwsem_waiter, list);
 102        }
 103
 104        sem->activity += woken;
 105
 106 out:
 107        return sem;
 108}
 109
 110/*
 111 * wake a single writer
 112 */
 113static inline struct rw_semaphore *
 114__rwsem_wake_one_writer(struct rw_semaphore *sem)
 115{
 116        struct rwsem_waiter *waiter;
 117
 118        waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
 119        wake_up_process(waiter->task);
 120
 121        return sem;
 122}
 123
 124/*
 125 * get a read lock on the semaphore
 126 */
 127void __sched __down_read(struct rw_semaphore *sem)
 128{
 129        struct rwsem_waiter waiter;
 130        struct task_struct *tsk;
 131        unsigned long flags;
 132
 133        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 134
 135        if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
 136                /* granted */
 137                sem->activity++;
 138                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 139                goto out;
 140        }
 141
 142        tsk = current;
 143        set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 144
 145        /* set up my own style of waitqueue */
 146        waiter.task = tsk;
 147        waiter.flags = RWSEM_WAITING_FOR_READ;
 148        get_task_struct(tsk);
 149
 150        list_add_tail(&waiter.list, &sem->wait_list);
 151
 152        /* we don't need to touch the semaphore struct anymore */
 153        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 154
 155        /* wait to be given the lock */
 156        for (;;) {
 157                if (!waiter.task)
 158                        break;
 159                schedule();
 160                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 161        }
 162
 163        tsk->state = TASK_RUNNING;
 164 out:
 165        ;
 166}
 167
 168/*
 169 * trylock for reading -- returns 1 if successful, 0 if contention
 170 */
 171int __down_read_trylock(struct rw_semaphore *sem)
 172{
 173        unsigned long flags;
 174        int ret = 0;
 175
 176
 177        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 178
 179        if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
 180                /* granted */
 181                sem->activity++;
 182                ret = 1;
 183        }
 184
 185        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 186
 187        return ret;
 188}
 189
 190/*
 191 * get a write lock on the semaphore
 192 */
 193void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
 194{
 195        struct rwsem_waiter waiter;
 196        struct task_struct *tsk;
 197        unsigned long flags;
 198
 199        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 200
 201        /* set up my own style of waitqueue */
 202        tsk = current;
 203        waiter.task = tsk;
 204        waiter.flags = RWSEM_WAITING_FOR_WRITE;
 205        list_add_tail(&waiter.list, &sem->wait_list);
 206
 207        /* wait for someone to release the lock */
 208        for (;;) {
 209                /*
 210                 * That is the key to support write lock stealing: allows the
 211                 * task already on CPU to get the lock soon rather than put
 212                 * itself into sleep and waiting for system woke it or someone
 213                 * else in the head of the wait list up.
 214                 */
 215                if (sem->activity == 0)
 216                        break;
 217                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 218                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 219                schedule();
 220                raw_spin_lock_irqsave(&sem->wait_lock, flags);
 221        }
 222        /* got the lock */
 223        sem->activity = -1;
 224        list_del(&waiter.list);
 225
 226        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 227}
 228
 229void __sched __down_write(struct rw_semaphore *sem)
 230{
 231        __down_write_nested(sem, 0);
 232}
 233
 234/*
 235 * trylock for writing -- returns 1 if successful, 0 if contention
 236 */
 237int __down_write_trylock(struct rw_semaphore *sem)
 238{
 239        unsigned long flags;
 240        int ret = 0;
 241
 242        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 243
 244        if (sem->activity == 0) {
 245                /* got the lock */
 246                sem->activity = -1;
 247                ret = 1;
 248        }
 249
 250        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 251
 252        return ret;
 253}
 254
 255/*
 256 * release a read lock on the semaphore
 257 */
 258void __up_read(struct rw_semaphore *sem)
 259{
 260        unsigned long flags;
 261
 262        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 263
 264        if (--sem->activity == 0 && !list_empty(&sem->wait_list))
 265                sem = __rwsem_wake_one_writer(sem);
 266
 267        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 268}
 269
 270/*
 271 * release a write lock on the semaphore
 272 */
 273void __up_write(struct rw_semaphore *sem)
 274{
 275        unsigned long flags;
 276
 277        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 278
 279        sem->activity = 0;
 280        if (!list_empty(&sem->wait_list))
 281                sem = __rwsem_do_wake(sem, 1);
 282
 283        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 284}
 285
 286/*
 287 * downgrade a write lock into a read lock
 288 * - just wake up any readers at the front of the queue
 289 */
 290void __downgrade_write(struct rw_semaphore *sem)
 291{
 292        unsigned long flags;
 293
 294        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 295
 296        sem->activity = 1;
 297        if (!list_empty(&sem->wait_list))
 298                sem = __rwsem_do_wake(sem, 0);
 299
 300        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 301}
 302
 303
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.