linux/kernel/spinlock.c
<<
>>
Prefs
   1/*
   2 * Copyright (2004) Linus Torvalds
   3 *
   4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
   5 *
   6 * Copyright (2004, 2005) Ingo Molnar
   7 *
   8 * This file contains the spinlock/rwlock implementations for the
   9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
  10 *
  11 * Note that some architectures have special knowledge about the
  12 * stack frames of these functions in their profile_pc. If you
  13 * change anything significant here that could change the stack
  14 * frame contact the architecture maintainers.
  15 */
  16
  17#include <linux/linkage.h>
  18#include <linux/preempt.h>
  19#include <linux/spinlock.h>
  20#include <linux/interrupt.h>
  21#include <linux/debug_locks.h>
  22#include <linux/module.h>
  23
  24int __lockfunc _spin_trylock(spinlock_t *lock)
  25{
  26        preempt_disable();
  27        if (_raw_spin_trylock(lock)) {
  28                spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  29                return 1;
  30        }
  31        
  32        preempt_enable();
  33        return 0;
  34}
  35EXPORT_SYMBOL(_spin_trylock);
  36
  37int __lockfunc _read_trylock(rwlock_t *lock)
  38{
  39        preempt_disable();
  40        if (_raw_read_trylock(lock)) {
  41                rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
  42                return 1;
  43        }
  44
  45        preempt_enable();
  46        return 0;
  47}
  48EXPORT_SYMBOL(_read_trylock);
  49
  50int __lockfunc _write_trylock(rwlock_t *lock)
  51{
  52        preempt_disable();
  53        if (_raw_write_trylock(lock)) {
  54                rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  55                return 1;
  56        }
  57
  58        preempt_enable();
  59        return 0;
  60}
  61EXPORT_SYMBOL(_write_trylock);
  62
  63/*
  64 * If lockdep is enabled then we use the non-preemption spin-ops
  65 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  66 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  67 */
  68#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
  69
  70void __lockfunc _read_lock(rwlock_t *lock)
  71{
  72        preempt_disable();
  73        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  74        LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  75}
  76EXPORT_SYMBOL(_read_lock);
  77
  78unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
  79{
  80        unsigned long flags;
  81
  82        local_irq_save(flags);
  83        preempt_disable();
  84        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  85        /*
  86         * On lockdep we dont want the hand-coded irq-enable of
  87         * _raw_spin_lock_flags() code, because lockdep assumes
  88         * that interrupts are not re-enabled during lock-acquire:
  89         */
  90#ifdef CONFIG_LOCKDEP
  91        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  92#else
  93        _raw_spin_lock_flags(lock, &flags);
  94#endif
  95        return flags;
  96}
  97EXPORT_SYMBOL(_spin_lock_irqsave);
  98
  99void __lockfunc _spin_lock_irq(spinlock_t *lock)
 100{
 101        local_irq_disable();
 102        preempt_disable();
 103        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 104        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 105}
 106EXPORT_SYMBOL(_spin_lock_irq);
 107
 108void __lockfunc _spin_lock_bh(spinlock_t *lock)
 109{
 110        local_bh_disable();
 111        preempt_disable();
 112        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 113        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 114}
 115EXPORT_SYMBOL(_spin_lock_bh);
 116
 117unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
 118{
 119        unsigned long flags;
 120
 121        local_irq_save(flags);
 122        preempt_disable();
 123        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
 124        LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
 125        return flags;
 126}
 127EXPORT_SYMBOL(_read_lock_irqsave);
 128
 129void __lockfunc _read_lock_irq(rwlock_t *lock)
 130{
 131        local_irq_disable();
 132        preempt_disable();
 133        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
 134        LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
 135}
 136EXPORT_SYMBOL(_read_lock_irq);
 137
 138void __lockfunc _read_lock_bh(rwlock_t *lock)
 139{
 140        local_bh_disable();
 141        preempt_disable();
 142        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
 143        LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
 144}
 145EXPORT_SYMBOL(_read_lock_bh);
 146
 147unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
 148{
 149        unsigned long flags;
 150
 151        local_irq_save(flags);
 152        preempt_disable();
 153        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 154        LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 155        return flags;
 156}
 157EXPORT_SYMBOL(_write_lock_irqsave);
 158
 159void __lockfunc _write_lock_irq(rwlock_t *lock)
 160{
 161        local_irq_disable();
 162        preempt_disable();
 163        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 164        LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 165}
 166EXPORT_SYMBOL(_write_lock_irq);
 167
 168void __lockfunc _write_lock_bh(rwlock_t *lock)
 169{
 170        local_bh_disable();
 171        preempt_disable();
 172        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 173        LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 174}
 175EXPORT_SYMBOL(_write_lock_bh);
 176
 177void __lockfunc _spin_lock(spinlock_t *lock)
 178{
 179        preempt_disable();
 180        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 181        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 182}
 183
 184EXPORT_SYMBOL(_spin_lock);
 185
 186void __lockfunc _write_lock(rwlock_t *lock)
 187{
 188        preempt_disable();
 189        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 190        LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 191}
 192
 193EXPORT_SYMBOL(_write_lock);
 194
 195#else /* CONFIG_PREEMPT: */
 196
 197/*
 198 * This could be a long-held lock. We both prepare to spin for a long
 199 * time (making _this_ CPU preemptable if possible), and we also signal
 200 * towards that other CPU that it should break the lock ASAP.
 201 *
 202 * (We do this in a function because inlining it would be excessive.)
 203 */
 204
 205#define BUILD_LOCK_OPS(op, locktype)                                    \
 206void __lockfunc _##op##_lock(locktype##_t *lock)                        \
 207{                                                                       \
 208        for (;;) {                                                      \
 209                preempt_disable();                                      \
 210                if (likely(_raw_##op##_trylock(lock)))                  \
 211                        break;                                          \
 212                preempt_enable();                                       \
 213                                                                        \
 214                if (!(lock)->break_lock)                                \
 215                        (lock)->break_lock = 1;                         \
 216                while (!op##_can_lock(lock) && (lock)->break_lock)      \
 217                        _raw_##op##_relax(&lock->raw_lock);             \
 218        }                                                               \
 219        (lock)->break_lock = 0;                                         \
 220}                                                                       \
 221                                                                        \
 222EXPORT_SYMBOL(_##op##_lock);                                            \
 223                                                                        \
 224unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock)       \
 225{                                                                       \
 226        unsigned long flags;                                            \
 227                                                                        \
 228        for (;;) {                                                      \
 229                preempt_disable();                                      \
 230                local_irq_save(flags);                                  \
 231                if (likely(_raw_##op##_trylock(lock)))                  \
 232                        break;                                          \
 233                local_irq_restore(flags);                               \
 234                preempt_enable();                                       \
 235                                                                        \
 236                if (!(lock)->break_lock)                                \
 237                        (lock)->break_lock = 1;                         \
 238                while (!op##_can_lock(lock) && (lock)->break_lock)      \
 239                        _raw_##op##_relax(&lock->raw_lock);             \
 240        }                                                               \
 241        (lock)->break_lock = 0;                                         \
 242        return flags;                                                   \
 243}                                                                       \
 244                                                                        \
 245EXPORT_SYMBOL(_##op##_lock_irqsave);                                    \
 246                                                                        \
 247void __lockfunc _##op##_lock_irq(locktype##_t *lock)                    \
 248{                                                                       \
 249        _##op##_lock_irqsave(lock);                                     \
 250}                                                                       \
 251                                                                        \
 252EXPORT_SYMBOL(_##op##_lock_irq);                                        \
 253                                                                        \
 254void __lockfunc _##op##_lock_bh(locktype##_t *lock)                     \
 255{                                                                       \
 256        unsigned long flags;                                            \
 257                                                                        \
 258        /*                                                      */      \
 259        /* Careful: we must exclude softirqs too, hence the     */      \
 260        /* irq-disabling. We use the generic preemption-aware   */      \
 261        /* function:                                            */      \
 262        /**/                                                            \
 263        flags = _##op##_lock_irqsave(lock);                             \
 264        local_bh_disable();                                             \
 265        local_irq_restore(flags);                                       \
 266}                                                                       \
 267                                                                        \
 268EXPORT_SYMBOL(_##op##_lock_bh)
 269
 270/*
 271 * Build preemption-friendly versions of the following
 272 * lock-spinning functions:
 273 *
 274 *         _[spin|read|write]_lock()
 275 *         _[spin|read|write]_lock_irq()
 276 *         _[spin|read|write]_lock_irqsave()
 277 *         _[spin|read|write]_lock_bh()
 278 */
 279BUILD_LOCK_OPS(spin, spinlock);
 280BUILD_LOCK_OPS(read, rwlock);
 281BUILD_LOCK_OPS(write, rwlock);
 282
 283#endif /* CONFIG_PREEMPT */
 284
 285#ifdef CONFIG_DEBUG_LOCK_ALLOC
 286
 287void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
 288{
 289        preempt_disable();
 290        spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 291        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 292}
 293EXPORT_SYMBOL(_spin_lock_nested);
 294
 295unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
 296{
 297        unsigned long flags;
 298
 299        local_irq_save(flags);
 300        preempt_disable();
 301        spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 302        /*
 303         * On lockdep we dont want the hand-coded irq-enable of
 304         * _raw_spin_lock_flags() code, because lockdep assumes
 305         * that interrupts are not re-enabled during lock-acquire:
 306         */
 307#ifdef CONFIG_LOCKDEP
 308        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 309#else
 310        _raw_spin_lock_flags(lock, &flags);
 311#endif
 312        return flags;
 313}
 314EXPORT_SYMBOL(_spin_lock_irqsave_nested);
 315
 316void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
 317                                     struct lockdep_map *nest_lock)
 318{
 319        preempt_disable();
 320        spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
 321        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 322}
 323EXPORT_SYMBOL(_spin_lock_nest_lock);
 324
 325#endif
 326
 327void __lockfunc _spin_unlock(spinlock_t *lock)
 328{
 329        spin_release(&lock->dep_map, 1, _RET_IP_);
 330        _raw_spin_unlock(lock);
 331        preempt_enable();
 332}
 333EXPORT_SYMBOL(_spin_unlock);
 334
 335void __lockfunc _write_unlock(rwlock_t *lock)
 336{
 337        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 338        _raw_write_unlock(lock);
 339        preempt_enable();
 340}
 341EXPORT_SYMBOL(_write_unlock);
 342
 343void __lockfunc _read_unlock(rwlock_t *lock)
 344{
 345        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 346        _raw_read_unlock(lock);
 347        preempt_enable();
 348}
 349EXPORT_SYMBOL(_read_unlock);
 350
 351void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
 352{
 353        spin_release(&lock->dep_map, 1, _RET_IP_);
 354        _raw_spin_unlock(lock);
 355        local_irq_restore(flags);
 356        preempt_enable();
 357}
 358EXPORT_SYMBOL(_spin_unlock_irqrestore);
 359
 360void __lockfunc _spin_unlock_irq(spinlock_t *lock)
 361{
 362        spin_release(&lock->dep_map, 1, _RET_IP_);
 363        _raw_spin_unlock(lock);
 364        local_irq_enable();
 365        preempt_enable();
 366}
 367EXPORT_SYMBOL(_spin_unlock_irq);
 368
 369void __lockfunc _spin_unlock_bh(spinlock_t *lock)
 370{
 371        spin_release(&lock->dep_map, 1, _RET_IP_);
 372        _raw_spin_unlock(lock);
 373        preempt_enable_no_resched();
 374        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 375}
 376EXPORT_SYMBOL(_spin_unlock_bh);
 377
 378void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 379{
 380        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 381        _raw_read_unlock(lock);
 382        local_irq_restore(flags);
 383        preempt_enable();
 384}
 385EXPORT_SYMBOL(_read_unlock_irqrestore);
 386
 387void __lockfunc _read_unlock_irq(rwlock_t *lock)
 388{
 389        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 390        _raw_read_unlock(lock);
 391        local_irq_enable();
 392        preempt_enable();
 393}
 394EXPORT_SYMBOL(_read_unlock_irq);
 395
 396void __lockfunc _read_unlock_bh(rwlock_t *lock)
 397{
 398        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 399        _raw_read_unlock(lock);
 400        preempt_enable_no_resched();
 401        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 402}
 403EXPORT_SYMBOL(_read_unlock_bh);
 404
 405void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 406{
 407        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 408        _raw_write_unlock(lock);
 409        local_irq_restore(flags);
 410        preempt_enable();
 411}
 412EXPORT_SYMBOL(_write_unlock_irqrestore);
 413
 414void __lockfunc _write_unlock_irq(rwlock_t *lock)
 415{
 416        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 417        _raw_write_unlock(lock);
 418        local_irq_enable();
 419        preempt_enable();
 420}
 421EXPORT_SYMBOL(_write_unlock_irq);
 422
 423void __lockfunc _write_unlock_bh(rwlock_t *lock)
 424{
 425        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 426        _raw_write_unlock(lock);
 427        preempt_enable_no_resched();
 428        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 429}
 430EXPORT_SYMBOL(_write_unlock_bh);
 431
 432int __lockfunc _spin_trylock_bh(spinlock_t *lock)
 433{
 434        local_bh_disable();
 435        preempt_disable();
 436        if (_raw_spin_trylock(lock)) {
 437                spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 438                return 1;
 439        }
 440
 441        preempt_enable_no_resched();
 442        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 443        return 0;
 444}
 445EXPORT_SYMBOL(_spin_trylock_bh);
 446
 447notrace int in_lock_functions(unsigned long addr)
 448{
 449        /* Linker adds these: start and end of __lockfunc functions */
 450        extern char __lock_text_start[], __lock_text_end[];
 451
 452        return addr >= (unsigned long)__lock_text_start
 453        && addr < (unsigned long)__lock_text_end;
 454}
 455EXPORT_SYMBOL(in_lock_functions);
 456