linux/arch/ia64/include/asm/bitops.h
<<
>>
Prefs
   1#ifndef _ASM_IA64_BITOPS_H
   2#define _ASM_IA64_BITOPS_H
   3
   4/*
   5 * Copyright (C) 1998-2003 Hewlett-Packard Co
   6 *      David Mosberger-Tang <davidm@hpl.hp.com>
   7 *
   8 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
   9 * O(1) scheduler patch
  10 */
  11
  12#ifndef _LINUX_BITOPS_H
  13#error only <linux/bitops.h> can be included directly
  14#endif
  15
  16#include <linux/compiler.h>
  17#include <linux/types.h>
  18#include <asm/intrinsics.h>
  19
  20/**
  21 * set_bit - Atomically set a bit in memory
  22 * @nr: the bit to set
  23 * @addr: the address to start counting from
  24 *
  25 * This function is atomic and may not be reordered.  See __set_bit()
  26 * if you do not require the atomic guarantees.
  27 * Note that @nr may be almost arbitrarily large; this function is not
  28 * restricted to acting on a single-word quantity.
  29 *
  30 * The address must be (at least) "long" aligned.
  31 * Note that there are driver (e.g., eepro100) which use these operations to
  32 * operate on hw-defined data-structures, so we can't easily change these
  33 * operations to force a bigger alignment.
  34 *
  35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  36 */
  37static __inline__ void
  38set_bit (int nr, volatile void *addr)
  39{
  40        __u32 bit, old, new;
  41        volatile __u32 *m;
  42        CMPXCHG_BUGCHECK_DECL
  43
  44        m = (volatile __u32 *) addr + (nr >> 5);
  45        bit = 1 << (nr & 31);
  46        do {
  47                CMPXCHG_BUGCHECK(m);
  48                old = *m;
  49                new = old | bit;
  50        } while (cmpxchg_acq(m, old, new) != old);
  51}
  52
  53/**
  54 * __set_bit - Set a bit in memory
  55 * @nr: the bit to set
  56 * @addr: the address to start counting from
  57 *
  58 * Unlike set_bit(), this function is non-atomic and may be reordered.
  59 * If it's called on the same region of memory simultaneously, the effect
  60 * may be that only one operation succeeds.
  61 */
  62static __inline__ void
  63__set_bit (int nr, volatile void *addr)
  64{
  65        *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
  66}
  67
  68/*
  69 * clear_bit() has "acquire" semantics.
  70 */
  71#define smp_mb__before_clear_bit()      smp_mb()
  72#define smp_mb__after_clear_bit()       do { /* skip */; } while (0)
  73
  74/**
  75 * clear_bit - Clears a bit in memory
  76 * @nr: Bit to clear
  77 * @addr: Address to start counting from
  78 *
  79 * clear_bit() is atomic and may not be reordered.  However, it does
  80 * not contain a memory barrier, so if it is used for locking purposes,
  81 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  82 * in order to ensure changes are visible on other processors.
  83 */
  84static __inline__ void
  85clear_bit (int nr, volatile void *addr)
  86{
  87        __u32 mask, old, new;
  88        volatile __u32 *m;
  89        CMPXCHG_BUGCHECK_DECL
  90
  91        m = (volatile __u32 *) addr + (nr >> 5);
  92        mask = ~(1 << (nr & 31));
  93        do {
  94                CMPXCHG_BUGCHECK(m);
  95                old = *m;
  96                new = old & mask;
  97        } while (cmpxchg_acq(m, old, new) != old);
  98}
  99
 100/**
 101 * clear_bit_unlock - Clears a bit in memory with release
 102 * @nr: Bit to clear
 103 * @addr: Address to start counting from
 104 *
 105 * clear_bit_unlock() is atomic and may not be reordered.  It does
 106 * contain a memory barrier suitable for unlock type operations.
 107 */
 108static __inline__ void
 109clear_bit_unlock (int nr, volatile void *addr)
 110{
 111        __u32 mask, old, new;
 112        volatile __u32 *m;
 113        CMPXCHG_BUGCHECK_DECL
 114
 115        m = (volatile __u32 *) addr + (nr >> 5);
 116        mask = ~(1 << (nr & 31));
 117        do {
 118                CMPXCHG_BUGCHECK(m);
 119                old = *m;
 120                new = old & mask;
 121        } while (cmpxchg_rel(m, old, new) != old);
 122}
 123
 124/**
 125 * __clear_bit_unlock - Non-atomically clears a bit in memory with release
 126 * @nr: Bit to clear
 127 * @addr: Address to start counting from
 128 *
 129 * Similarly to clear_bit_unlock, the implementation uses a store
 130 * with release semantics. See also __raw_spin_unlock().
 131 */
 132static __inline__ void
 133__clear_bit_unlock(int nr, void *addr)
 134{
 135        __u32 * const m = (__u32 *) addr + (nr >> 5);
 136        __u32 const new = *m & ~(1 << (nr & 31));
 137
 138        ia64_st4_rel_nta(m, new);
 139}
 140
 141/**
 142 * __clear_bit - Clears a bit in memory (non-atomic version)
 143 * @nr: the bit to clear
 144 * @addr: the address to start counting from
 145 *
 146 * Unlike clear_bit(), this function is non-atomic and may be reordered.
 147 * If it's called on the same region of memory simultaneously, the effect
 148 * may be that only one operation succeeds.
 149 */
 150static __inline__ void
 151__clear_bit (int nr, volatile void *addr)
 152{
 153        *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
 154}
 155
 156/**
 157 * change_bit - Toggle a bit in memory
 158 * @nr: Bit to toggle
 159 * @addr: Address to start counting from
 160 *
 161 * change_bit() is atomic and may not be reordered.
 162 * Note that @nr may be almost arbitrarily large; this function is not
 163 * restricted to acting on a single-word quantity.
 164 */
 165static __inline__ void
 166change_bit (int nr, volatile void *addr)
 167{
 168        __u32 bit, old, new;
 169        volatile __u32 *m;
 170        CMPXCHG_BUGCHECK_DECL
 171
 172        m = (volatile __u32 *) addr + (nr >> 5);
 173        bit = (1 << (nr & 31));
 174        do {
 175                CMPXCHG_BUGCHECK(m);
 176                old = *m;
 177                new = old ^ bit;
 178        } while (cmpxchg_acq(m, old, new) != old);
 179}
 180
 181/**
 182 * __change_bit - Toggle a bit in memory
 183 * @nr: the bit to toggle
 184 * @addr: the address to start counting from
 185 *
 186 * Unlike change_bit(), this function is non-atomic and may be reordered.
 187 * If it's called on the same region of memory simultaneously, the effect
 188 * may be that only one operation succeeds.
 189 */
 190static __inline__ void
 191__change_bit (int nr, volatile void *addr)
 192{
 193        *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
 194}
 195
 196/**
 197 * test_and_set_bit - Set a bit and return its old value
 198 * @nr: Bit to set
 199 * @addr: Address to count from
 200 *
 201 * This operation is atomic and cannot be reordered.  
 202 * It also implies the acquisition side of the memory barrier.
 203 */
 204static __inline__ int
 205test_and_set_bit (int nr, volatile void *addr)
 206{
 207        __u32 bit, old, new;
 208        volatile __u32 *m;
 209        CMPXCHG_BUGCHECK_DECL
 210
 211        m = (volatile __u32 *) addr + (nr >> 5);
 212        bit = 1 << (nr & 31);
 213        do {
 214                CMPXCHG_BUGCHECK(m);
 215                old = *m;
 216                new = old | bit;
 217        } while (cmpxchg_acq(m, old, new) != old);
 218        return (old & bit) != 0;
 219}
 220
 221/**
 222 * test_and_set_bit_lock - Set a bit and return its old value for lock
 223 * @nr: Bit to set
 224 * @addr: Address to count from
 225 *
 226 * This is the same as test_and_set_bit on ia64
 227 */
 228#define test_and_set_bit_lock test_and_set_bit
 229
 230/**
 231 * __test_and_set_bit - Set a bit and return its old value
 232 * @nr: Bit to set
 233 * @addr: Address to count from
 234 *
 235 * This operation is non-atomic and can be reordered.  
 236 * If two examples of this operation race, one can appear to succeed
 237 * but actually fail.  You must protect multiple accesses with a lock.
 238 */
 239static __inline__ int
 240__test_and_set_bit (int nr, volatile void *addr)
 241{
 242        __u32 *p = (__u32 *) addr + (nr >> 5);
 243        __u32 m = 1 << (nr & 31);
 244        int oldbitset = (*p & m) != 0;
 245
 246        *p |= m;
 247        return oldbitset;
 248}
 249
 250/**
 251 * test_and_clear_bit - Clear a bit and return its old value
 252 * @nr: Bit to clear
 253 * @addr: Address to count from
 254 *
 255 * This operation is atomic and cannot be reordered.  
 256 * It also implies the acquisition side of the memory barrier.
 257 */
 258static __inline__ int
 259test_and_clear_bit (int nr, volatile void *addr)
 260{
 261        __u32 mask, old, new;
 262        volatile __u32 *m;
 263        CMPXCHG_BUGCHECK_DECL
 264
 265        m = (volatile __u32 *) addr + (nr >> 5);
 266        mask = ~(1 << (nr & 31));
 267        do {
 268                CMPXCHG_BUGCHECK(m);
 269                old = *m;
 270                new = old & mask;
 271        } while (cmpxchg_acq(m, old, new) != old);
 272        return (old & ~mask) != 0;
 273}
 274
 275/**
 276 * __test_and_clear_bit - Clear a bit and return its old value
 277 * @nr: Bit to clear
 278 * @addr: Address to count from
 279 *
 280 * This operation is non-atomic and can be reordered.  
 281 * If two examples of this operation race, one can appear to succeed
 282 * but actually fail.  You must protect multiple accesses with a lock.
 283 */
 284static __inline__ int
 285__test_and_clear_bit(int nr, volatile void * addr)
 286{
 287        __u32 *p = (__u32 *) addr + (nr >> 5);
 288        __u32 m = 1 << (nr & 31);
 289        int oldbitset = *p & m;
 290
 291        *p &= ~m;
 292        return oldbitset;
 293}
 294
 295/**
 296 * test_and_change_bit - Change a bit and return its old value
 297 * @nr: Bit to change
 298 * @addr: Address to count from
 299 *
 300 * This operation is atomic and cannot be reordered.  
 301 * It also implies the acquisition side of the memory barrier.
 302 */
 303static __inline__ int
 304test_and_change_bit (int nr, volatile void *addr)
 305{
 306        __u32 bit, old, new;
 307        volatile __u32 *m;
 308        CMPXCHG_BUGCHECK_DECL
 309
 310        m = (volatile __u32 *) addr + (nr >> 5);
 311        bit = (1 << (nr & 31));
 312        do {
 313                CMPXCHG_BUGCHECK(m);
 314                old = *m;
 315                new = old ^ bit;
 316        } while (cmpxchg_acq(m, old, new) != old);
 317        return (old & bit) != 0;
 318}
 319
 320/**
 321 * __test_and_change_bit - Change a bit and return its old value
 322 * @nr: Bit to change
 323 * @addr: Address to count from
 324 *
 325 * This operation is non-atomic and can be reordered.
 326 */
 327static __inline__ int
 328__test_and_change_bit (int nr, void *addr)
 329{
 330        __u32 old, bit = (1 << (nr & 31));
 331        __u32 *m = (__u32 *) addr + (nr >> 5);
 332
 333        old = *m;
 334        *m = old ^ bit;
 335        return (old & bit) != 0;
 336}
 337
 338static __inline__ int
 339test_bit (int nr, const volatile void *addr)
 340{
 341        return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
 342}
 343
 344/**
 345 * ffz - find the first zero bit in a long word
 346 * @x: The long word to find the bit in
 347 *
 348 * Returns the bit-number (0..63) of the first (least significant) zero bit.
 349 * Undefined if no zero exists, so code should check against ~0UL first...
 350 */
 351static inline unsigned long
 352ffz (unsigned long x)
 353{
 354        unsigned long result;
 355
 356        result = ia64_popcnt(x & (~x - 1));
 357        return result;
 358}
 359
 360/**
 361 * __ffs - find first bit in word.
 362 * @x: The word to search
 363 *
 364 * Undefined if no bit exists, so code should check against 0 first.
 365 */
 366static __inline__ unsigned long
 367__ffs (unsigned long x)
 368{
 369        unsigned long result;
 370
 371        result = ia64_popcnt((x-1) & ~x);
 372        return result;
 373}
 374
 375#ifdef __KERNEL__
 376
 377/*
 378 * Return bit number of last (most-significant) bit set.  Undefined
 379 * for x==0.  Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
 380 */
 381static inline unsigned long
 382ia64_fls (unsigned long x)
 383{
 384        long double d = x;
 385        long exp;
 386
 387        exp = ia64_getf_exp(d);
 388        return exp - 0xffff;
 389}
 390
 391/*
 392 * Find the last (most significant) bit set.  Returns 0 for x==0 and
 393 * bits are numbered from 1..32 (e.g., fls(9) == 4).
 394 */
 395static inline int
 396fls (int t)
 397{
 398        unsigned long x = t & 0xffffffffu;
 399
 400        if (!x)
 401                return 0;
 402        x |= x >> 1;
 403        x |= x >> 2;
 404        x |= x >> 4;
 405        x |= x >> 8;
 406        x |= x >> 16;
 407        return ia64_popcnt(x);
 408}
 409
 410/*
 411 * Find the last (most significant) bit set.  Undefined for x==0.
 412 * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
 413 */
 414static inline unsigned long
 415__fls (unsigned long x)
 416{
 417        x |= x >> 1;
 418        x |= x >> 2;
 419        x |= x >> 4;
 420        x |= x >> 8;
 421        x |= x >> 16;
 422        x |= x >> 32;
 423        return ia64_popcnt(x) - 1;
 424}
 425
 426#include <asm-generic/bitops/fls64.h>
 427
 428/*
 429 * ffs: find first bit set. This is defined the same way as the libc and
 430 * compiler builtin ffs routines, therefore differs in spirit from the above
 431 * ffz (man ffs): it operates on "int" values only and the result value is the
 432 * bit number + 1.  ffs(0) is defined to return zero.
 433 */
 434#define ffs(x)  __builtin_ffs(x)
 435
 436/*
 437 * hweightN: returns the hamming weight (i.e. the number
 438 * of bits set) of a N-bit word
 439 */
 440static __inline__ unsigned long
 441hweight64 (unsigned long x)
 442{
 443        unsigned long result;
 444        result = ia64_popcnt(x);
 445        return result;
 446}
 447
 448#define hweight32(x)    (unsigned int) hweight64((x) & 0xfffffffful)
 449#define hweight16(x)    (unsigned int) hweight64((x) & 0xfffful)
 450#define hweight8(x)     (unsigned int) hweight64((x) & 0xfful)
 451
 452#endif /* __KERNEL__ */
 453
 454#include <asm-generic/bitops/find.h>
 455
 456#ifdef __KERNEL__
 457
 458#include <asm-generic/bitops/ext2-non-atomic.h>
 459
 460#define ext2_set_bit_atomic(l,n,a)      test_and_set_bit(n,a)
 461#define ext2_clear_bit_atomic(l,n,a)    test_and_clear_bit(n,a)
 462
 463#include <asm-generic/bitops/minix.h>
 464#include <asm-generic/bitops/sched.h>
 465
 466#endif /* __KERNEL__ */
 467
 468#endif /* _ASM_IA64_BITOPS_H */
 469
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.