linux/kernel/spinlock.c
<<
>>
Prefs
   1/*
   2 * Copyright (2004) Linus Torvalds
   3 *
   4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
   5 *
   6 * Copyright (2004, 2005) Ingo Molnar
   7 *
   8 * This file contains the spinlock/rwlock implementations for the
   9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
  10 *
  11 * Note that some architectures have special knowledge about the
  12 * stack frames of these functions in their profile_pc. If you
  13 * change anything significant here that could change the stack
  14 * frame contact the architecture maintainers.
  15 */
  16
  17#include <linux/linkage.h>
  18#include <linux/preempt.h>
  19#include <linux/spinlock.h>
  20#include <linux/interrupt.h>
  21#include <linux/debug_locks.h>
  22#include <linux/module.h>
  23
  24int __lockfunc _spin_trylock(spinlock_t *lock)
  25{
  26        preempt_disable();
  27        if (_raw_spin_trylock(lock)) {
  28                spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  29                return 1;
  30        }
  31        
  32        preempt_enable();
  33        return 0;
  34}
  35EXPORT_SYMBOL(_spin_trylock);
  36
  37int __lockfunc _read_trylock(rwlock_t *lock)
  38{
  39        preempt_disable();
  40        if (_raw_read_trylock(lock)) {
  41                rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
  42                return 1;
  43        }
  44
  45        preempt_enable();
  46        return 0;
  47}
  48EXPORT_SYMBOL(_read_trylock);
  49
  50int __lockfunc _write_trylock(rwlock_t *lock)
  51{
  52        preempt_disable();
  53        if (_raw_write_trylock(lock)) {
  54                rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  55                return 1;
  56        }
  57
  58        preempt_enable();
  59        return 0;
  60}
  61EXPORT_SYMBOL(_write_trylock);
  62
  63/*
  64 * If lockdep is enabled then we use the non-preemption spin-ops
  65 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  66 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  67 */
  68#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
  69
  70void __lockfunc _read_lock(rwlock_t *lock)
  71{
  72        preempt_disable();
  73        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  74        LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  75}
  76EXPORT_SYMBOL(_read_lock);
  77
  78unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
  79{
  80        unsigned long flags;
  81
  82        local_irq_save(flags);
  83        preempt_disable();
  84        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  85        /*
  86         * On lockdep we dont want the hand-coded irq-enable of
  87         * _raw_spin_lock_flags() code, because lockdep assumes
  88         * that interrupts are not re-enabled during lock-acquire:
  89         */
  90#ifdef CONFIG_LOCKDEP
  91        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  92#else
  93        _raw_spin_lock_flags(lock, &flags);
  94#endif
  95        return flags;
  96}
  97EXPORT_SYMBOL(_spin_lock_irqsave);
  98
  99void __lockfunc _spin_lock_irq(spinlock_t *lock)
 100{
 101        local_irq_disable();
 102        preempt_disable();
 103        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 104        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 105}
 106EXPORT_SYMBOL(_spin_lock_irq);
 107
 108void __lockfunc _spin_lock_bh(spinlock_t *lock)
 109{
 110        local_bh_disable();
 111        preempt_disable();
 112        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 113        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 114}
 115EXPORT_SYMBOL(_spin_lock_bh);
 116
 117unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
 118{
 119        unsigned long flags;
 120
 121        local_irq_save(flags);
 122        preempt_disable();
 123        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
 124        LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
 125                             _raw_read_lock_flags, &flags);
 126        return flags;
 127}
 128EXPORT_SYMBOL(_read_lock_irqsave);
 129
 130void __lockfunc _read_lock_irq(rwlock_t *lock)
 131{
 132        local_irq_disable();
 133        preempt_disable();
 134        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
 135        LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
 136}
 137EXPORT_SYMBOL(_read_lock_irq);
 138
 139void __lockfunc _read_lock_bh(rwlock_t *lock)
 140{
 141        local_bh_disable();
 142        preempt_disable();
 143        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
 144        LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
 145}
 146EXPORT_SYMBOL(_read_lock_bh);
 147
 148unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
 149{
 150        unsigned long flags;
 151
 152        local_irq_save(flags);
 153        preempt_disable();
 154        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 155        LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
 156                             _raw_write_lock_flags, &flags);
 157        return flags;
 158}
 159EXPORT_SYMBOL(_write_lock_irqsave);
 160
 161void __lockfunc _write_lock_irq(rwlock_t *lock)
 162{
 163        local_irq_disable();
 164        preempt_disable();
 165        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 166        LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 167}
 168EXPORT_SYMBOL(_write_lock_irq);
 169
 170void __lockfunc _write_lock_bh(rwlock_t *lock)
 171{
 172        local_bh_disable();
 173        preempt_disable();
 174        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 175        LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 176}
 177EXPORT_SYMBOL(_write_lock_bh);
 178
 179void __lockfunc _spin_lock(spinlock_t *lock)
 180{
 181        preempt_disable();
 182        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 183        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 184}
 185
 186EXPORT_SYMBOL(_spin_lock);
 187
 188void __lockfunc _write_lock(rwlock_t *lock)
 189{
 190        preempt_disable();
 191        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 192        LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 193}
 194
 195EXPORT_SYMBOL(_write_lock);
 196
 197#else /* CONFIG_PREEMPT: */
 198
 199/*
 200 * This could be a long-held lock. We both prepare to spin for a long
 201 * time (making _this_ CPU preemptable if possible), and we also signal
 202 * towards that other CPU that it should break the lock ASAP.
 203 *
 204 * (We do this in a function because inlining it would be excessive.)
 205 */
 206
 207#define BUILD_LOCK_OPS(op, locktype)                                    \
 208void __lockfunc _##op##_lock(locktype##_t *lock)                        \
 209{                                                                       \
 210        for (;;) {                                                      \
 211                preempt_disable();                                      \
 212                if (likely(_raw_##op##_trylock(lock)))                  \
 213                        break;                                          \
 214                preempt_enable();                                       \
 215                                                                        \
 216                if (!(lock)->break_lock)                                \
 217                        (lock)->break_lock = 1;                         \
 218                while (!op##_can_lock(lock) && (lock)->break_lock)      \
 219                        _raw_##op##_relax(&lock->raw_lock);             \
 220        }                                                               \
 221        (lock)->break_lock = 0;                                         \
 222}                                                                       \
 223                                                                        \
 224EXPORT_SYMBOL(_##op##_lock);                                            \
 225                                                                        \
 226unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock)       \
 227{                                                                       \
 228        unsigned long flags;                                            \
 229                                                                        \
 230        for (;;) {                                                      \
 231                preempt_disable();                                      \
 232                local_irq_save(flags);                                  \
 233                if (likely(_raw_##op##_trylock(lock)))                  \
 234                        break;                                          \
 235                local_irq_restore(flags);                               \
 236                preempt_enable();                                       \
 237                                                                        \
 238                if (!(lock)->break_lock)                                \
 239                        (lock)->break_lock = 1;                         \
 240                while (!op##_can_lock(lock) && (lock)->break_lock)      \
 241                        _raw_##op##_relax(&lock->raw_lock);             \
 242        }                                                               \
 243        (lock)->break_lock = 0;                                         \
 244        return flags;                                                   \
 245}                                                                       \
 246                                                                        \
 247EXPORT_SYMBOL(_##op##_lock_irqsave);                                    \
 248                                                                        \
 249void __lockfunc _##op##_lock_irq(locktype##_t *lock)                    \
 250{                                                                       \
 251        _##op##_lock_irqsave(lock);                                     \
 252}                                                                       \
 253                                                                        \
 254EXPORT_SYMBOL(_##op##_lock_irq);                                        \
 255                                                                        \
 256void __lockfunc _##op##_lock_bh(locktype##_t *lock)                     \
 257{                                                                       \
 258        unsigned long flags;                                            \
 259                                                                        \
 260        /*                                                      */      \
 261        /* Careful: we must exclude softirqs too, hence the     */      \
 262        /* irq-disabling. We use the generic preemption-aware   */      \
 263        /* function:                                            */      \
 264        /**/                                                            \
 265        flags = _##op##_lock_irqsave(lock);                             \
 266        local_bh_disable();                                             \
 267        local_irq_restore(flags);                                       \
 268}                                                                       \
 269                                                                        \
 270EXPORT_SYMBOL(_##op##_lock_bh)
 271
 272/*
 273 * Build preemption-friendly versions of the following
 274 * lock-spinning functions:
 275 *
 276 *         _[spin|read|write]_lock()
 277 *         _[spin|read|write]_lock_irq()
 278 *         _[spin|read|write]_lock_irqsave()
 279 *         _[spin|read|write]_lock_bh()
 280 */
 281BUILD_LOCK_OPS(spin, spinlock);
 282BUILD_LOCK_OPS(read, rwlock);
 283BUILD_LOCK_OPS(write, rwlock);
 284
 285#endif /* CONFIG_PREEMPT */
 286
 287#ifdef CONFIG_DEBUG_LOCK_ALLOC
 288
 289void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
 290{
 291        preempt_disable();
 292        spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 293        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 294}
 295EXPORT_SYMBOL(_spin_lock_nested);
 296
 297unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
 298{
 299        unsigned long flags;
 300
 301        local_irq_save(flags);
 302        preempt_disable();
 303        spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 304        LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
 305                                _raw_spin_lock_flags, &flags);
 306        return flags;
 307}
 308EXPORT_SYMBOL(_spin_lock_irqsave_nested);
 309
 310void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
 311                                     struct lockdep_map *nest_lock)
 312{
 313        preempt_disable();
 314        spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
 315        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 316}
 317EXPORT_SYMBOL(_spin_lock_nest_lock);
 318
 319#endif
 320
 321void __lockfunc _spin_unlock(spinlock_t *lock)
 322{
 323        spin_release(&lock->dep_map, 1, _RET_IP_);
 324        _raw_spin_unlock(lock);
 325        preempt_enable();
 326}
 327EXPORT_SYMBOL(_spin_unlock);
 328
 329void __lockfunc _write_unlock(rwlock_t *lock)
 330{
 331        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 332        _raw_write_unlock(lock);
 333        preempt_enable();
 334}
 335EXPORT_SYMBOL(_write_unlock);
 336
 337void __lockfunc _read_unlock(rwlock_t *lock)
 338{
 339        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 340        _raw_read_unlock(lock);
 341        preempt_enable();
 342}
 343EXPORT_SYMBOL(_read_unlock);
 344
 345void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
 346{
 347        spin_release(&lock->dep_map, 1, _RET_IP_);
 348        _raw_spin_unlock(lock);
 349        local_irq_restore(flags);
 350        preempt_enable();
 351}
 352EXPORT_SYMBOL(_spin_unlock_irqrestore);
 353
 354void __lockfunc _spin_unlock_irq(spinlock_t *lock)
 355{
 356        spin_release(&lock->dep_map, 1, _RET_IP_);
 357        _raw_spin_unlock(lock);
 358        local_irq_enable();
 359        preempt_enable();
 360}
 361EXPORT_SYMBOL(_spin_unlock_irq);
 362
 363void __lockfunc _spin_unlock_bh(spinlock_t *lock)
 364{
 365        spin_release(&lock->dep_map, 1, _RET_IP_);
 366        _raw_spin_unlock(lock);
 367        preempt_enable_no_resched();
 368        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 369}
 370EXPORT_SYMBOL(_spin_unlock_bh);
 371
 372void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 373{
 374        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 375        _raw_read_unlock(lock);
 376        local_irq_restore(flags);
 377        preempt_enable();
 378}
 379EXPORT_SYMBOL(_read_unlock_irqrestore);
 380
 381void __lockfunc _read_unlock_irq(rwlock_t *lock)
 382{
 383        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 384        _raw_read_unlock(lock);
 385        local_irq_enable();
 386        preempt_enable();
 387}
 388EXPORT_SYMBOL(_read_unlock_irq);
 389
 390void __lockfunc _read_unlock_bh(rwlock_t *lock)
 391{
 392        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 393        _raw_read_unlock(lock);
 394        preempt_enable_no_resched();
 395        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 396}
 397EXPORT_SYMBOL(_read_unlock_bh);
 398
 399void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 400{
 401        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 402        _raw_write_unlock(lock);
 403        local_irq_restore(flags);
 404        preempt_enable();
 405}
 406EXPORT_SYMBOL(_write_unlock_irqrestore);
 407
 408void __lockfunc _write_unlock_irq(rwlock_t *lock)
 409{
 410        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 411        _raw_write_unlock(lock);
 412        local_irq_enable();
 413        preempt_enable();
 414}
 415EXPORT_SYMBOL(_write_unlock_irq);
 416
 417void __lockfunc _write_unlock_bh(rwlock_t *lock)
 418{
 419        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 420        _raw_write_unlock(lock);
 421        preempt_enable_no_resched();
 422        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 423}
 424EXPORT_SYMBOL(_write_unlock_bh);
 425
 426int __lockfunc _spin_trylock_bh(spinlock_t *lock)
 427{
 428        local_bh_disable();
 429        preempt_disable();
 430        if (_raw_spin_trylock(lock)) {
 431                spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 432                return 1;
 433        }
 434
 435        preempt_enable_no_resched();
 436        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 437        return 0;
 438}
 439EXPORT_SYMBOL(_spin_trylock_bh);
 440
 441notrace int in_lock_functions(unsigned long addr)
 442{
 443        /* Linker adds these: start and end of __lockfunc functions */
 444        extern char __lock_text_start[], __lock_text_end[];
 445
 446        return addr >= (unsigned long)__lock_text_start
 447        && addr < (unsigned long)__lock_text_end;
 448}
 449EXPORT_SYMBOL(in_lock_functions);
 450