linux/arch/mips/include/asm/atomic.h
<<
>>
Prefs
   1/*
   2 * Atomic operations that C can't guarantee us.  Useful for
   3 * resource counting etc..
   4 *
   5 * But use these as seldom as possible since they are much more slower
   6 * than regular operations.
   7 *
   8 * This file is subject to the terms and conditions of the GNU General Public
   9 * License.  See the file "COPYING" in the main directory of this archive
  10 * for more details.
  11 *
  12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
  13 */
  14#ifndef _ASM_ATOMIC_H
  15#define _ASM_ATOMIC_H
  16
  17#include <linux/irqflags.h>
  18#include <linux/types.h>
  19#include <asm/barrier.h>
  20#include <asm/compiler.h>
  21#include <asm/cpu-features.h>
  22#include <asm/cmpxchg.h>
  23#include <asm/llsc.h>
  24#include <asm/sync.h>
  25#include <asm/war.h>
  26
  27#define ATOMIC_OPS(pfx, type)                                           \
  28static __always_inline type arch_##pfx##_read(const pfx##_t *v)         \
  29{                                                                       \
  30        return READ_ONCE(v->counter);                                   \
  31}                                                                       \
  32                                                                        \
  33static __always_inline void arch_##pfx##_set(pfx##_t *v, type i)        \
  34{                                                                       \
  35        WRITE_ONCE(v->counter, i);                                      \
  36}                                                                       \
  37                                                                        \
  38static __always_inline type                                             \
  39arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n)                        \
  40{                                                                       \
  41        return arch_cmpxchg(&v->counter, o, n);                         \
  42}                                                                       \
  43                                                                        \
  44static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n)       \
  45{                                                                       \
  46        return arch_xchg(&v->counter, n);                               \
  47}
  48
  49ATOMIC_OPS(atomic, int)
  50
  51#ifdef CONFIG_64BIT
  52# define ATOMIC64_INIT(i)       { (i) }
  53ATOMIC_OPS(atomic64, s64)
  54#endif
  55
  56#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)                  \
  57static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v)           \
  58{                                                                       \
  59        type temp;                                                      \
  60                                                                        \
  61        if (!kernel_uses_llsc) {                                        \
  62                unsigned long flags;                                    \
  63                                                                        \
  64                raw_local_irq_save(flags);                              \
  65                v->counter c_op i;                                      \
  66                raw_local_irq_restore(flags);                           \
  67                return;                                                 \
  68        }                                                               \
  69                                                                        \
  70        __asm__ __volatile__(                                           \
  71        "       .set    push                                    \n"     \
  72        "       .set    " MIPS_ISA_LEVEL "                      \n"     \
  73        "       " __SYNC(full, loongson3_war) "                 \n"     \
  74        "1:     " #ll " %0, %1          # " #pfx "_" #op "      \n"     \
  75        "       " #asm_op " %0, %2                              \n"     \
  76        "       " #sc " %0, %1                                  \n"     \
  77        "\t" __SC_BEQZ "%0, 1b                                  \n"     \
  78        "       .set    pop                                     \n"     \
  79        : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)            \
  80        : "Ir" (i) : __LLSC_CLOBBER);                                   \
  81}
  82
  83#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc)           \
  84static __inline__ type                                                  \
  85arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v)                 \
  86{                                                                       \
  87        type temp, result;                                              \
  88                                                                        \
  89        if (!kernel_uses_llsc) {                                        \
  90                unsigned long flags;                                    \
  91                                                                        \
  92                raw_local_irq_save(flags);                              \
  93                result = v->counter;                                    \
  94                result c_op i;                                          \
  95                v->counter = result;                                    \
  96                raw_local_irq_restore(flags);                           \
  97                return result;                                          \
  98        }                                                               \
  99                                                                        \
 100        __asm__ __volatile__(                                           \
 101        "       .set    push                                    \n"     \
 102        "       .set    " MIPS_ISA_LEVEL "                      \n"     \
 103        "       " __SYNC(full, loongson3_war) "                 \n"     \
 104        "1:     " #ll " %1, %2          # " #pfx "_" #op "_return\n"    \
 105        "       " #asm_op " %0, %1, %3                          \n"     \
 106        "       " #sc " %0, %2                                  \n"     \
 107        "\t" __SC_BEQZ "%0, 1b                                  \n"     \
 108        "       " #asm_op " %0, %1, %3                          \n"     \
 109        "       .set    pop                                     \n"     \
 110        : "=&r" (result), "=&r" (temp),                                 \
 111          "+" GCC_OFF_SMALL_ASM() (v->counter)                          \
 112        : "Ir" (i) : __LLSC_CLOBBER);                                   \
 113                                                                        \
 114        return result;                                                  \
 115}
 116
 117#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)            \
 118static __inline__ type                                                  \
 119arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v)                  \
 120{                                                                       \
 121        int temp, result;                                               \
 122                                                                        \
 123        if (!kernel_uses_llsc) {                                        \
 124                unsigned long flags;                                    \
 125                                                                        \
 126                raw_local_irq_save(flags);                              \
 127                result = v->counter;                                    \
 128                v->counter c_op i;                                      \
 129                raw_local_irq_restore(flags);                           \
 130                return result;                                          \
 131        }                                                               \
 132                                                                        \
 133        __asm__ __volatile__(                                           \
 134        "       .set    push                                    \n"     \
 135        "       .set    " MIPS_ISA_LEVEL "                      \n"     \
 136        "       " __SYNC(full, loongson3_war) "                 \n"     \
 137        "1:     " #ll " %1, %2          # " #pfx "_fetch_" #op "\n"     \
 138        "       " #asm_op " %0, %1, %3                          \n"     \
 139        "       " #sc " %0, %2                                  \n"     \
 140        "\t" __SC_BEQZ "%0, 1b                                  \n"     \
 141        "       .set    pop                                     \n"     \
 142        "       move    %0, %1                                  \n"     \
 143        : "=&r" (result), "=&r" (temp),                                 \
 144          "+" GCC_OFF_SMALL_ASM() (v->counter)                          \
 145        : "Ir" (i) : __LLSC_CLOBBER);                                   \
 146                                                                        \
 147        return result;                                                  \
 148}
 149
 150#undef ATOMIC_OPS
 151#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc)                 \
 152        ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)                  \
 153        ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc)           \
 154        ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
 155
 156ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
 157ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
 158
 159#define arch_atomic_add_return_relaxed  arch_atomic_add_return_relaxed
 160#define arch_atomic_sub_return_relaxed  arch_atomic_sub_return_relaxed
 161#define arch_atomic_fetch_add_relaxed   arch_atomic_fetch_add_relaxed
 162#define arch_atomic_fetch_sub_relaxed   arch_atomic_fetch_sub_relaxed
 163
 164#ifdef CONFIG_64BIT
 165ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
 166ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
 167# define arch_atomic64_add_return_relaxed       arch_atomic64_add_return_relaxed
 168# define arch_atomic64_sub_return_relaxed       arch_atomic64_sub_return_relaxed
 169# define arch_atomic64_fetch_add_relaxed        arch_atomic64_fetch_add_relaxed
 170# define arch_atomic64_fetch_sub_relaxed        arch_atomic64_fetch_sub_relaxed
 171#endif /* CONFIG_64BIT */
 172
 173#undef ATOMIC_OPS
 174#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc)                 \
 175        ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)                  \
 176        ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
 177
 178ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
 179ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
 180ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
 181
 182#define arch_atomic_fetch_and_relaxed   arch_atomic_fetch_and_relaxed
 183#define arch_atomic_fetch_or_relaxed    arch_atomic_fetch_or_relaxed
 184#define arch_atomic_fetch_xor_relaxed   arch_atomic_fetch_xor_relaxed
 185
 186#ifdef CONFIG_64BIT
 187ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
 188ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
 189ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
 190# define arch_atomic64_fetch_and_relaxed        arch_atomic64_fetch_and_relaxed
 191# define arch_atomic64_fetch_or_relaxed         arch_atomic64_fetch_or_relaxed
 192# define arch_atomic64_fetch_xor_relaxed        arch_atomic64_fetch_xor_relaxed
 193#endif
 194
 195#undef ATOMIC_OPS
 196#undef ATOMIC_FETCH_OP
 197#undef ATOMIC_OP_RETURN
 198#undef ATOMIC_OP
 199
 200/*
 201 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
 202 * @i: integer value to subtract
 203 * @v: pointer of type atomic_t
 204 *
 205 * Atomically test @v and subtract @i if @v is greater or equal than @i.
 206 * The function returns the old value of @v minus @i.
 207 */
 208#define ATOMIC_SIP_OP(pfx, type, op, ll, sc)                            \
 209static __inline__ int arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
 210{                                                                       \
 211        type temp, result;                                              \
 212                                                                        \
 213        smp_mb__before_atomic();                                        \
 214                                                                        \
 215        if (!kernel_uses_llsc) {                                        \
 216                unsigned long flags;                                    \
 217                                                                        \
 218                raw_local_irq_save(flags);                              \
 219                result = v->counter;                                    \
 220                result -= i;                                            \
 221                if (result >= 0)                                        \
 222                        v->counter = result;                            \
 223                raw_local_irq_restore(flags);                           \
 224                smp_mb__after_atomic();                                 \
 225                return result;                                          \
 226        }                                                               \
 227                                                                        \
 228        __asm__ __volatile__(                                           \
 229        "       .set    push                                    \n"     \
 230        "       .set    " MIPS_ISA_LEVEL "                      \n"     \
 231        "       " __SYNC(full, loongson3_war) "                 \n"     \
 232        "1:     " #ll " %1, %2          # atomic_sub_if_positive\n"     \
 233        "       .set    pop                                     \n"     \
 234        "       " #op " %0, %1, %3                              \n"     \
 235        "       move    %1, %0                                  \n"     \
 236        "       bltz    %0, 2f                                  \n"     \
 237        "       .set    push                                    \n"     \
 238        "       .set    " MIPS_ISA_LEVEL "                      \n"     \
 239        "       " #sc " %1, %2                                  \n"     \
 240        "       " __SC_BEQZ "%1, 1b                             \n"     \
 241        "2:     " __SYNC(full, loongson3_war) "                 \n"     \
 242        "       .set    pop                                     \n"     \
 243        : "=&r" (result), "=&r" (temp),                                 \
 244          "+" GCC_OFF_SMALL_ASM() (v->counter)                          \
 245        : "Ir" (i)                                                      \
 246        : __LLSC_CLOBBER);                                              \
 247                                                                        \
 248        /*                                                              \
 249         * In the Loongson3 workaround case we already have a           \
 250         * completion barrier at 2: above, which is needed due to the   \
 251         * bltz that can branch to code outside of the LL/SC loop. As   \
 252         * such, we don't need to emit another barrier here.            \
 253         */                                                             \
 254        if (__SYNC_loongson3_war == 0)                                  \
 255                smp_mb__after_atomic();                                 \
 256                                                                        \
 257        return result;                                                  \
 258}
 259
 260ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
 261#define arch_atomic_dec_if_positive(v)  arch_atomic_sub_if_positive(1, v)
 262
 263#ifdef CONFIG_64BIT
 264ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
 265#define arch_atomic64_dec_if_positive(v)        arch_atomic64_sub_if_positive(1, v)
 266#endif
 267
 268#undef ATOMIC_SIP_OP
 269
 270#endif /* _ASM_ATOMIC_H */
 271