linux/kernel/spinlock.c
<<
>>
Prefs
   1/*
   2 * Copyright (2004) Linus Torvalds
   3 *
   4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
   5 *
   6 * Copyright (2004, 2005) Ingo Molnar
   7 *
   8 * This file contains the spinlock/rwlock implementations for the
   9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
  10 *
  11 * Note that some architectures have special knowledge about the
  12 * stack frames of these functions in their profile_pc. If you
  13 * change anything significant here that could change the stack
  14 * frame contact the architecture maintainers.
  15 */
  16
  17#include <linux/linkage.h>
  18#include <linux/preempt.h>
  19#include <linux/spinlock.h>
  20#include <linux/interrupt.h>
  21#include <linux/debug_locks.h>
  22#include <linux/module.h>
  23
  24int __lockfunc _spin_trylock(spinlock_t *lock)
  25{
  26        preempt_disable();
  27        if (_raw_spin_trylock(lock)) {
  28                spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  29                return 1;
  30        }
  31        
  32        preempt_enable();
  33        return 0;
  34}
  35EXPORT_SYMBOL(_spin_trylock);
  36
  37int __lockfunc _read_trylock(rwlock_t *lock)
  38{
  39        preempt_disable();
  40        if (_raw_read_trylock(lock)) {
  41                rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
  42                return 1;
  43        }
  44
  45        preempt_enable();
  46        return 0;
  47}
  48EXPORT_SYMBOL(_read_trylock);
  49
  50int __lockfunc _write_trylock(rwlock_t *lock)
  51{
  52        preempt_disable();
  53        if (_raw_write_trylock(lock)) {
  54                rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  55                return 1;
  56        }
  57
  58        preempt_enable();
  59        return 0;
  60}
  61EXPORT_SYMBOL(_write_trylock);
  62
  63/*
  64 * If lockdep is enabled then we use the non-preemption spin-ops
  65 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  66 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  67 */
  68#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \
  69        defined(CONFIG_DEBUG_LOCK_ALLOC)
  70
  71void __lockfunc _read_lock(rwlock_t *lock)
  72{
  73        preempt_disable();
  74        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  75        _raw_read_lock(lock);
  76}
  77EXPORT_SYMBOL(_read_lock);
  78
  79unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
  80{
  81        unsigned long flags;
  82
  83        local_irq_save(flags);
  84        preempt_disable();
  85        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  86        /*
  87         * On lockdep we dont want the hand-coded irq-enable of
  88         * _raw_spin_lock_flags() code, because lockdep assumes
  89         * that interrupts are not re-enabled during lock-acquire:
  90         */
  91#ifdef CONFIG_PROVE_LOCKING
  92        _raw_spin_lock(lock);
  93#else
  94        _raw_spin_lock_flags(lock, &flags);
  95#endif
  96        return flags;
  97}
  98EXPORT_SYMBOL(_spin_lock_irqsave);
  99
 100void __lockfunc _spin_lock_irq(spinlock_t *lock)
 101{
 102        local_irq_disable();
 103        preempt_disable();
 104        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 105        _raw_spin_lock(lock);
 106}
 107EXPORT_SYMBOL(_spin_lock_irq);
 108
 109void __lockfunc _spin_lock_bh(spinlock_t *lock)
 110{
 111        local_bh_disable();
 112        preempt_disable();
 113        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 114        _raw_spin_lock(lock);
 115}
 116EXPORT_SYMBOL(_spin_lock_bh);
 117
 118unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
 119{
 120        unsigned long flags;
 121
 122        local_irq_save(flags);
 123        preempt_disable();
 124        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
 125        _raw_read_lock(lock);
 126        return flags;
 127}
 128EXPORT_SYMBOL(_read_lock_irqsave);
 129
 130void __lockfunc _read_lock_irq(rwlock_t *lock)
 131{
 132        local_irq_disable();
 133        preempt_disable();
 134        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
 135        _raw_read_lock(lock);
 136}
 137EXPORT_SYMBOL(_read_lock_irq);
 138
 139void __lockfunc _read_lock_bh(rwlock_t *lock)
 140{
 141        local_bh_disable();
 142        preempt_disable();
 143        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
 144        _raw_read_lock(lock);
 145}
 146EXPORT_SYMBOL(_read_lock_bh);
 147
 148unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
 149{
 150        unsigned long flags;
 151
 152        local_irq_save(flags);
 153        preempt_disable();
 154        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 155        _raw_write_lock(lock);
 156        return flags;
 157}
 158EXPORT_SYMBOL(_write_lock_irqsave);
 159
 160void __lockfunc _write_lock_irq(rwlock_t *lock)
 161{
 162        local_irq_disable();
 163        preempt_disable();
 164        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 165        _raw_write_lock(lock);
 166}
 167EXPORT_SYMBOL(_write_lock_irq);
 168
 169void __lockfunc _write_lock_bh(rwlock_t *lock)
 170{
 171        local_bh_disable();
 172        preempt_disable();
 173        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 174        _raw_write_lock(lock);
 175}
 176EXPORT_SYMBOL(_write_lock_bh);
 177
 178void __lockfunc _spin_lock(spinlock_t *lock)
 179{
 180        preempt_disable();
 181        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 182        _raw_spin_lock(lock);
 183}
 184
 185EXPORT_SYMBOL(_spin_lock);
 186
 187void __lockfunc _write_lock(rwlock_t *lock)
 188{
 189        preempt_disable();
 190        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 191        _raw_write_lock(lock);
 192}
 193
 194EXPORT_SYMBOL(_write_lock);
 195
 196#else /* CONFIG_PREEMPT: */
 197
 198/*
 199 * This could be a long-held lock. We both prepare to spin for a long
 200 * time (making _this_ CPU preemptable if possible), and we also signal
 201 * towards that other CPU that it should break the lock ASAP.
 202 *
 203 * (We do this in a function because inlining it would be excessive.)
 204 */
 205
 206#define BUILD_LOCK_OPS(op, locktype)                                    \
 207void __lockfunc _##op##_lock(locktype##_t *lock)                        \
 208{                                                                       \
 209        for (;;) {                                                      \
 210                preempt_disable();                                      \
 211                if (likely(_raw_##op##_trylock(lock)))                  \
 212                        break;                                          \
 213                preempt_enable();                                       \
 214                                                                        \
 215                if (!(lock)->break_lock)                                \
 216                        (lock)->break_lock = 1;                         \
 217                while (!op##_can_lock(lock) && (lock)->break_lock)      \
 218                        _raw_##op##_relax(&lock->raw_lock);             \
 219        }                                                               \
 220        (lock)->break_lock = 0;                                         \
 221}                                                                       \
 222                                                                        \
 223EXPORT_SYMBOL(_##op##_lock);                                            \
 224                                                                        \
 225unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock)       \
 226{                                                                       \
 227        unsigned long flags;                                            \
 228                                                                        \
 229        for (;;) {                                                      \
 230                preempt_disable();                                      \
 231                local_irq_save(flags);                                  \
 232                if (likely(_raw_##op##_trylock(lock)))                  \
 233                        break;                                          \
 234                local_irq_restore(flags);                               \
 235                preempt_enable();                                       \
 236                                                                        \
 237                if (!(lock)->break_lock)                                \
 238                        (lock)->break_lock = 1;                         \
 239                while (!op##_can_lock(lock) && (lock)->break_lock)      \
 240                        _raw_##op##_relax(&lock->raw_lock);             \
 241        }                                                               \
 242        (lock)->break_lock = 0;                                         \
 243        return flags;                                                   \
 244}                                                                       \
 245                                                                        \
 246EXPORT_SYMBOL(_##op##_lock_irqsave);                                    \
 247                                                                        \
 248void __lockfunc _##op##_lock_irq(locktype##_t *lock)                    \
 249{                                                                       \
 250        _##op##_lock_irqsave(lock);                                     \
 251}                                                                       \
 252                                                                        \
 253EXPORT_SYMBOL(_##op##_lock_irq);                                        \
 254                                                                        \
 255void __lockfunc _##op##_lock_bh(locktype##_t *lock)                     \
 256{                                                                       \
 257        unsigned long flags;                                            \
 258                                                                        \
 259        /*                                                      */      \
 260        /* Careful: we must exclude softirqs too, hence the     */      \
 261        /* irq-disabling. We use the generic preemption-aware   */      \
 262        /* function:                                            */      \
 263        /**/                                                            \
 264        flags = _##op##_lock_irqsave(lock);                             \
 265        local_bh_disable();                                             \
 266        local_irq_restore(flags);                                       \
 267}                                                                       \
 268                                                                        \
 269EXPORT_SYMBOL(_##op##_lock_bh)
 270
 271/*
 272 * Build preemption-friendly versions of the following
 273 * lock-spinning functions:
 274 *
 275 *         _[spin|read|write]_lock()
 276 *         _[spin|read|write]_lock_irq()
 277 *         _[spin|read|write]_lock_irqsave()
 278 *         _[spin|read|write]_lock_bh()
 279 */
 280BUILD_LOCK_OPS(spin, spinlock);
 281BUILD_LOCK_OPS(read, rwlock);
 282BUILD_LOCK_OPS(write, rwlock);
 283
 284#endif /* CONFIG_PREEMPT */
 285
 286#ifdef CONFIG_DEBUG_LOCK_ALLOC
 287
 288void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
 289{
 290        preempt_disable();
 291        spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 292        _raw_spin_lock(lock);
 293}
 294
 295EXPORT_SYMBOL(_spin_lock_nested);
 296unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
 297{
 298        unsigned long flags;
 299
 300        local_irq_save(flags);
 301        preempt_disable();
 302        spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 303        /*
 304         * On lockdep we dont want the hand-coded irq-enable of
 305         * _raw_spin_lock_flags() code, because lockdep assumes
 306         * that interrupts are not re-enabled during lock-acquire:
 307         */
 308#ifdef CONFIG_PROVE_SPIN_LOCKING
 309        _raw_spin_lock(lock);
 310#else
 311        _raw_spin_lock_flags(lock, &flags);
 312#endif
 313        return flags;
 314}
 315
 316EXPORT_SYMBOL(_spin_lock_irqsave_nested);
 317
 318#endif
 319
 320void __lockfunc _spin_unlock(spinlock_t *lock)
 321{
 322        spin_release(&lock->dep_map, 1, _RET_IP_);
 323        _raw_spin_unlock(lock);
 324        preempt_enable();
 325}
 326EXPORT_SYMBOL(_spin_unlock);
 327
 328void __lockfunc _write_unlock(rwlock_t *lock)
 329{
 330        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 331        _raw_write_unlock(lock);
 332        preempt_enable();
 333}
 334EXPORT_SYMBOL(_write_unlock);
 335
 336void __lockfunc _read_unlock(rwlock_t *lock)
 337{
 338        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 339        _raw_read_unlock(lock);
 340        preempt_enable();
 341}
 342EXPORT_SYMBOL(_read_unlock);
 343
 344void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
 345{
 346        spin_release(&lock->dep_map, 1, _RET_IP_);
 347        _raw_spin_unlock(lock);
 348        local_irq_restore(flags);
 349        preempt_enable();
 350}
 351EXPORT_SYMBOL(_spin_unlock_irqrestore);
 352
 353void __lockfunc _spin_unlock_irq(spinlock_t *lock)
 354{
 355        spin_release(&lock->dep_map, 1, _RET_IP_);
 356        _raw_spin_unlock(lock);
 357        local_irq_enable();
 358        preempt_enable();
 359}
 360EXPORT_SYMBOL(_spin_unlock_irq);
 361
 362void __lockfunc _spin_unlock_bh(spinlock_t *lock)
 363{
 364        spin_release(&lock->dep_map, 1, _RET_IP_);
 365        _raw_spin_unlock(lock);
 366        preempt_enable_no_resched();
 367        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 368}
 369EXPORT_SYMBOL(_spin_unlock_bh);
 370
 371void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 372{
 373        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 374        _raw_read_unlock(lock);
 375        local_irq_restore(flags);
 376        preempt_enable();
 377}
 378EXPORT_SYMBOL(_read_unlock_irqrestore);
 379
 380void __lockfunc _read_unlock_irq(rwlock_t *lock)
 381{
 382        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 383        _raw_read_unlock(lock);
 384        local_irq_enable();
 385        preempt_enable();
 386}
 387EXPORT_SYMBOL(_read_unlock_irq);
 388
 389void __lockfunc _read_unlock_bh(rwlock_t *lock)
 390{
 391        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 392        _raw_read_unlock(lock);
 393        preempt_enable_no_resched();
 394        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 395}
 396EXPORT_SYMBOL(_read_unlock_bh);
 397
 398void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 399{
 400        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 401        _raw_write_unlock(lock);
 402        local_irq_restore(flags);
 403        preempt_enable();
 404}
 405EXPORT_SYMBOL(_write_unlock_irqrestore);
 406
 407void __lockfunc _write_unlock_irq(rwlock_t *lock)
 408{
 409        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 410        _raw_write_unlock(lock);
 411        local_irq_enable();
 412        preempt_enable();
 413}
 414EXPORT_SYMBOL(_write_unlock_irq);
 415
 416void __lockfunc _write_unlock_bh(rwlock_t *lock)
 417{
 418        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 419        _raw_write_unlock(lock);
 420        preempt_enable_no_resched();
 421        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 422}
 423EXPORT_SYMBOL(_write_unlock_bh);
 424
 425int __lockfunc _spin_trylock_bh(spinlock_t *lock)
 426{
 427        local_bh_disable();
 428        preempt_disable();
 429        if (_raw_spin_trylock(lock)) {
 430                spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 431                return 1;
 432        }
 433
 434        preempt_enable_no_resched();
 435        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 436        return 0;
 437}
 438EXPORT_SYMBOL(_spin_trylock_bh);
 439
 440int in_lock_functions(unsigned long addr)
 441{
 442        /* Linker adds these: start and end of __lockfunc functions */
 443        extern char __lock_text_start[], __lock_text_end[];
 444
 445        return addr >= (unsigned long)__lock_text_start
 446        && addr < (unsigned long)__lock_text_end;
 447}
 448EXPORT_SYMBOL(in_lock_functions);
 449
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.