linux/arch/x86/include/asm/uaccess.h
<<
>>
Prefs
   1#ifndef _ASM_X86_UACCESS_H
   2#define _ASM_X86_UACCESS_H
   3/*
   4 * User space memory access functions
   5 */
   6#include <linux/errno.h>
   7#include <linux/compiler.h>
   8#include <linux/kasan-checks.h>
   9#include <linux/thread_info.h>
  10#include <linux/string.h>
  11#include <asm/asm.h>
  12#include <asm/page.h>
  13#include <asm/smap.h>
  14#include <asm/extable.h>
  15
  16#define VERIFY_READ 0
  17#define VERIFY_WRITE 1
  18
  19/*
  20 * The fs value determines whether argument validity checking should be
  21 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  22 * get_fs() == KERNEL_DS, checking is bypassed.
  23 *
  24 * For historical reasons, these macros are grossly misnamed.
  25 */
  26
  27#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
  28
  29#define KERNEL_DS       MAKE_MM_SEG(-1UL)
  30#define USER_DS         MAKE_MM_SEG(TASK_SIZE_MAX)
  31
  32#define get_ds()        (KERNEL_DS)
  33#define get_fs()        (current->thread.addr_limit)
  34#define set_fs(x)       (current->thread.addr_limit = (x))
  35
  36#define segment_eq(a, b)        ((a).seg == (b).seg)
  37
  38#define user_addr_max() (current->thread.addr_limit.seg)
  39#define __addr_ok(addr)         \
  40        ((unsigned long __force)(addr) < user_addr_max())
  41
  42/*
  43 * Test whether a block of memory is a valid user space address.
  44 * Returns 0 if the range is valid, nonzero otherwise.
  45 */
  46static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
  47{
  48        /*
  49         * If we have used "sizeof()" for the size,
  50         * we know it won't overflow the limit (but
  51         * it might overflow the 'addr', so it's
  52         * important to subtract the size from the
  53         * limit, not add it to the address).
  54         */
  55        if (__builtin_constant_p(size))
  56                return unlikely(addr > limit - size);
  57
  58        /* Arbitrary sizes? Be careful about overflow */
  59        addr += size;
  60        if (unlikely(addr < size))
  61                return true;
  62        return unlikely(addr > limit);
  63}
  64
  65#define __range_not_ok(addr, size, limit)                               \
  66({                                                                      \
  67        __chk_user_ptr(addr);                                           \
  68        __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
  69})
  70
  71/**
  72 * access_ok: - Checks if a user space pointer is valid
  73 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
  74 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  75 *        to write to a block, it is always safe to read from it.
  76 * @addr: User space pointer to start of block to check
  77 * @size: Size of block to check
  78 *
  79 * Context: User context only. This function may sleep if pagefaults are
  80 *          enabled.
  81 *
  82 * Checks if a pointer to a block of memory in user space is valid.
  83 *
  84 * Returns true (nonzero) if the memory block may be valid, false (zero)
  85 * if it is definitely invalid.
  86 *
  87 * Note that, depending on architecture, this function probably just
  88 * checks that the pointer is in the user space range - after calling
  89 * this function, memory access functions may still return -EFAULT.
  90 */
  91#define access_ok(type, addr, size) \
  92        likely(!__range_not_ok(addr, size, user_addr_max()))
  93
  94/*
  95 * These are the main single-value transfer routines.  They automatically
  96 * use the right size if we just have the right pointer type.
  97 *
  98 * This gets kind of ugly. We want to return _two_ values in "get_user()"
  99 * and yet we don't want to do any pointers, because that is too much
 100 * of a performance impact. Thus we have a few rather ugly macros here,
 101 * and hide all the ugliness from the user.
 102 *
 103 * The "__xxx" versions of the user access functions are versions that
 104 * do not verify the address space, that must have been done previously
 105 * with a separate "access_ok()" call (this is used when we do multiple
 106 * accesses to the same area of user memory).
 107 */
 108
 109extern int __get_user_1(void);
 110extern int __get_user_2(void);
 111extern int __get_user_4(void);
 112extern int __get_user_8(void);
 113extern int __get_user_bad(void);
 114
 115#define __uaccess_begin() stac()
 116#define __uaccess_end()   clac()
 117
 118/*
 119 * This is a type: either unsigned long, if the argument fits into
 120 * that type, or otherwise unsigned long long.
 121 */
 122#define __inttype(x) \
 123__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 124
 125/**
 126 * get_user: - Get a simple variable from user space.
 127 * @x:   Variable to store result.
 128 * @ptr: Source address, in user space.
 129 *
 130 * Context: User context only. This function may sleep if pagefaults are
 131 *          enabled.
 132 *
 133 * This macro copies a single simple variable from user space to kernel
 134 * space.  It supports simple types like char and int, but not larger
 135 * data types like structures or arrays.
 136 *
 137 * @ptr must have pointer-to-simple-variable type, and the result of
 138 * dereferencing @ptr must be assignable to @x without a cast.
 139 *
 140 * Returns zero on success, or -EFAULT on error.
 141 * On error, the variable @x is set to zero.
 142 */
 143/*
 144 * Careful: we have to cast the result to the type of the pointer
 145 * for sign reasons.
 146 *
 147 * The use of _ASM_DX as the register specifier is a bit of a
 148 * simplification, as gcc only cares about it as the starting point
 149 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
 150 * (%ecx being the next register in gcc's x86 register sequence), and
 151 * %rdx on 64 bits.
 152 *
 153 * Clang/LLVM cares about the size of the register, but still wants
 154 * the base register for something that ends up being a pair.
 155 */
 156#define get_user(x, ptr)                                                \
 157({                                                                      \
 158        int __ret_gu;                                                   \
 159        register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);            \
 160        register void *__sp asm(_ASM_SP);                               \
 161        __chk_user_ptr(ptr);                                            \
 162        might_fault();                                                  \
 163        asm volatile("call __get_user_%P4"                              \
 164                     : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp)    \
 165                     : "0" (ptr), "i" (sizeof(*(ptr))));                \
 166        (x) = (__force __typeof__(*(ptr))) __val_gu;                    \
 167        __builtin_expect(__ret_gu, 0);                                  \
 168})
 169
 170#define __put_user_x(size, x, ptr, __ret_pu)                    \
 171        asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
 172                     : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
 173
 174
 175
 176#ifdef CONFIG_X86_32
 177#define __put_user_asm_u64(x, addr, err, errret)                        \
 178        asm volatile("\n"                                               \
 179                     "1:        movl %%eax,0(%2)\n"                     \
 180                     "2:        movl %%edx,4(%2)\n"                     \
 181                     "3:"                                               \
 182                     ".section .fixup,\"ax\"\n"                         \
 183                     "4:        movl %3,%0\n"                           \
 184                     "  jmp 3b\n"                                       \
 185                     ".previous\n"                                      \
 186                     _ASM_EXTABLE(1b, 4b)                               \
 187                     _ASM_EXTABLE(2b, 4b)                               \
 188                     : "=r" (err)                                       \
 189                     : "A" (x), "r" (addr), "i" (errret), "0" (err))
 190
 191#define __put_user_asm_ex_u64(x, addr)                                  \
 192        asm volatile("\n"                                               \
 193                     "1:        movl %%eax,0(%1)\n"                     \
 194                     "2:        movl %%edx,4(%1)\n"                     \
 195                     "3:"                                               \
 196                     _ASM_EXTABLE_EX(1b, 2b)                            \
 197                     _ASM_EXTABLE_EX(2b, 3b)                            \
 198                     : : "A" (x), "r" (addr))
 199
 200#define __put_user_x8(x, ptr, __ret_pu)                         \
 201        asm volatile("call __put_user_8" : "=a" (__ret_pu)      \
 202                     : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
 203#else
 204#define __put_user_asm_u64(x, ptr, retval, errret) \
 205        __put_user_asm(x, ptr, retval, "q", "", "er", errret)
 206#define __put_user_asm_ex_u64(x, addr)  \
 207        __put_user_asm_ex(x, addr, "q", "", "er")
 208#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
 209#endif
 210
 211extern void __put_user_bad(void);
 212
 213/*
 214 * Strange magic calling convention: pointer in %ecx,
 215 * value in %eax(:%edx), return value in %eax. clobbers %rbx
 216 */
 217extern void __put_user_1(void);
 218extern void __put_user_2(void);
 219extern void __put_user_4(void);
 220extern void __put_user_8(void);
 221
 222/**
 223 * put_user: - Write a simple value into user space.
 224 * @x:   Value to copy to user space.
 225 * @ptr: Destination address, in user space.
 226 *
 227 * Context: User context only. This function may sleep if pagefaults are
 228 *          enabled.
 229 *
 230 * This macro copies a single simple value from kernel space to user
 231 * space.  It supports simple types like char and int, but not larger
 232 * data types like structures or arrays.
 233 *
 234 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 235 * to the result of dereferencing @ptr.
 236 *
 237 * Returns zero on success, or -EFAULT on error.
 238 */
 239#define put_user(x, ptr)                                        \
 240({                                                              \
 241        int __ret_pu;                                           \
 242        __typeof__(*(ptr)) __pu_val;                            \
 243        __chk_user_ptr(ptr);                                    \
 244        might_fault();                                          \
 245        __pu_val = x;                                           \
 246        switch (sizeof(*(ptr))) {                               \
 247        case 1:                                                 \
 248                __put_user_x(1, __pu_val, ptr, __ret_pu);       \
 249                break;                                          \
 250        case 2:                                                 \
 251                __put_user_x(2, __pu_val, ptr, __ret_pu);       \
 252                break;                                          \
 253        case 4:                                                 \
 254                __put_user_x(4, __pu_val, ptr, __ret_pu);       \
 255                break;                                          \
 256        case 8:                                                 \
 257                __put_user_x8(__pu_val, ptr, __ret_pu);         \
 258                break;                                          \
 259        default:                                                \
 260                __put_user_x(X, __pu_val, ptr, __ret_pu);       \
 261                break;                                          \
 262        }                                                       \
 263        __builtin_expect(__ret_pu, 0);                          \
 264})
 265
 266#define __put_user_size(x, ptr, size, retval, errret)                   \
 267do {                                                                    \
 268        retval = 0;                                                     \
 269        __chk_user_ptr(ptr);                                            \
 270        switch (size) {                                                 \
 271        case 1:                                                         \
 272                __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
 273                break;                                                  \
 274        case 2:                                                         \
 275                __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
 276                break;                                                  \
 277        case 4:                                                         \
 278                __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
 279                break;                                                  \
 280        case 8:                                                         \
 281                __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval,  \
 282                                   errret);                             \
 283                break;                                                  \
 284        default:                                                        \
 285                __put_user_bad();                                       \
 286        }                                                               \
 287} while (0)
 288
 289/*
 290 * This doesn't do __uaccess_begin/end - the exception handling
 291 * around it must do that.
 292 */
 293#define __put_user_size_ex(x, ptr, size)                                \
 294do {                                                                    \
 295        __chk_user_ptr(ptr);                                            \
 296        switch (size) {                                                 \
 297        case 1:                                                         \
 298                __put_user_asm_ex(x, ptr, "b", "b", "iq");              \
 299                break;                                                  \
 300        case 2:                                                         \
 301                __put_user_asm_ex(x, ptr, "w", "w", "ir");              \
 302                break;                                                  \
 303        case 4:                                                         \
 304                __put_user_asm_ex(x, ptr, "l", "k", "ir");              \
 305                break;                                                  \
 306        case 8:                                                         \
 307                __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr);      \
 308                break;                                                  \
 309        default:                                                        \
 310                __put_user_bad();                                       \
 311        }                                                               \
 312} while (0)
 313
 314#ifdef CONFIG_X86_32
 315#define __get_user_asm_u64(x, ptr, retval, errret)                      \
 316({                                                                      \
 317        __typeof__(ptr) __ptr = (ptr);                                  \
 318        asm volatile(ASM_STAC "\n"                                      \
 319                     "1:        movl %2,%%eax\n"                        \
 320                     "2:        movl %3,%%edx\n"                        \
 321                     "3: " ASM_CLAC "\n"                                \
 322                     ".section .fixup,\"ax\"\n"                         \
 323                     "4:        mov %4,%0\n"                            \
 324                     "  xorl %%eax,%%eax\n"                             \
 325                     "  xorl %%edx,%%edx\n"                             \
 326                     "  jmp 3b\n"                                       \
 327                     ".previous\n"                                      \
 328                     _ASM_EXTABLE(1b, 4b)                               \
 329                     _ASM_EXTABLE(2b, 4b)                               \
 330                     : "=r" (retval), "=A"(x)                           \
 331                     : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
 332                       "i" (errret), "0" (retval));                     \
 333})
 334
 335#define __get_user_asm_ex_u64(x, ptr)                   (x) = __get_user_bad()
 336#else
 337#define __get_user_asm_u64(x, ptr, retval, errret) \
 338         __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
 339#define __get_user_asm_ex_u64(x, ptr) \
 340         __get_user_asm_ex(x, ptr, "q", "", "=r")
 341#endif
 342
 343#define __get_user_size(x, ptr, size, retval, errret)                   \
 344do {                                                                    \
 345        retval = 0;                                                     \
 346        __chk_user_ptr(ptr);                                            \
 347        switch (size) {                                                 \
 348        case 1:                                                         \
 349                __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
 350                break;                                                  \
 351        case 2:                                                         \
 352                __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
 353                break;                                                  \
 354        case 4:                                                         \
 355                __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
 356                break;                                                  \
 357        case 8:                                                         \
 358                __get_user_asm_u64(x, ptr, retval, errret);             \
 359                break;                                                  \
 360        default:                                                        \
 361                (x) = __get_user_bad();                                 \
 362        }                                                               \
 363} while (0)
 364
 365#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
 366        asm volatile("\n"                                               \
 367                     "1:        mov"itype" %2,%"rtype"1\n"              \
 368                     "2:\n"                                             \
 369                     ".section .fixup,\"ax\"\n"                         \
 370                     "3:        mov %3,%0\n"                            \
 371                     "  xor"itype" %"rtype"1,%"rtype"1\n"               \
 372                     "  jmp 2b\n"                                       \
 373                     ".previous\n"                                      \
 374                     _ASM_EXTABLE(1b, 3b)                               \
 375                     : "=r" (err), ltype(x)                             \
 376                     : "m" (__m(addr)), "i" (errret), "0" (err))
 377
 378/*
 379 * This doesn't do __uaccess_begin/end - the exception handling
 380 * around it must do that.
 381 */
 382#define __get_user_size_ex(x, ptr, size)                                \
 383do {                                                                    \
 384        __chk_user_ptr(ptr);                                            \
 385        switch (size) {                                                 \
 386        case 1:                                                         \
 387                __get_user_asm_ex(x, ptr, "b", "b", "=q");              \
 388                break;                                                  \
 389        case 2:                                                         \
 390                __get_user_asm_ex(x, ptr, "w", "w", "=r");              \
 391                break;                                                  \
 392        case 4:                                                         \
 393                __get_user_asm_ex(x, ptr, "l", "k", "=r");              \
 394                break;                                                  \
 395        case 8:                                                         \
 396                __get_user_asm_ex_u64(x, ptr);                          \
 397                break;                                                  \
 398        default:                                                        \
 399                (x) = __get_user_bad();                                 \
 400        }                                                               \
 401} while (0)
 402
 403#define __get_user_asm_ex(x, addr, itype, rtype, ltype)                 \
 404        asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
 405                     "2:\n"                                             \
 406                     ".section .fixup,\"ax\"\n"                         \
 407                     "3:xor"itype" %"rtype"0,%"rtype"0\n"               \
 408                     "  jmp 2b\n"                                       \
 409                     ".previous\n"                                      \
 410                     _ASM_EXTABLE_EX(1b, 3b)                            \
 411                     : ltype(x) : "m" (__m(addr)))
 412
 413#define __put_user_nocheck(x, ptr, size)                        \
 414({                                                              \
 415        int __pu_err;                                           \
 416        __uaccess_begin();                                      \
 417        __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
 418        __uaccess_end();                                        \
 419        __builtin_expect(__pu_err, 0);                          \
 420})
 421
 422#define __get_user_nocheck(x, ptr, size)                                \
 423({                                                                      \
 424        int __gu_err;                                                   \
 425        __inttype(*(ptr)) __gu_val;                                     \
 426        __uaccess_begin();                                              \
 427        __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
 428        __uaccess_end();                                                \
 429        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 430        __builtin_expect(__gu_err, 0);                                  \
 431})
 432
 433/* FIXME: this hack is definitely wrong -AK */
 434struct __large_struct { unsigned long buf[100]; };
 435#define __m(x) (*(struct __large_struct __user *)(x))
 436
 437/*
 438 * Tell gcc we read from memory instead of writing: this is because
 439 * we do not write to any memory gcc knows about, so there are no
 440 * aliasing issues.
 441 */
 442#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
 443        asm volatile("\n"                                               \
 444                     "1:        mov"itype" %"rtype"1,%2\n"              \
 445                     "2:\n"                                             \
 446                     ".section .fixup,\"ax\"\n"                         \
 447                     "3:        mov %3,%0\n"                            \
 448                     "  jmp 2b\n"                                       \
 449                     ".previous\n"                                      \
 450                     _ASM_EXTABLE(1b, 3b)                               \
 451                     : "=r"(err)                                        \
 452                     : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
 453
 454#define __put_user_asm_ex(x, addr, itype, rtype, ltype)                 \
 455        asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
 456                     "2:\n"                                             \
 457                     _ASM_EXTABLE_EX(1b, 2b)                            \
 458                     : : ltype(x), "m" (__m(addr)))
 459
 460/*
 461 * uaccess_try and catch
 462 */
 463#define uaccess_try     do {                                            \
 464        current->thread.uaccess_err = 0;                                \
 465        __uaccess_begin();                                              \
 466        barrier();
 467
 468#define uaccess_catch(err)                                              \
 469        __uaccess_end();                                                \
 470        (err) |= (current->thread.uaccess_err ? -EFAULT : 0);           \
 471} while (0)
 472
 473/**
 474 * __get_user: - Get a simple variable from user space, with less checking.
 475 * @x:   Variable to store result.
 476 * @ptr: Source address, in user space.
 477 *
 478 * Context: User context only. This function may sleep if pagefaults are
 479 *          enabled.
 480 *
 481 * This macro copies a single simple variable from user space to kernel
 482 * space.  It supports simple types like char and int, but not larger
 483 * data types like structures or arrays.
 484 *
 485 * @ptr must have pointer-to-simple-variable type, and the result of
 486 * dereferencing @ptr must be assignable to @x without a cast.
 487 *
 488 * Caller must check the pointer with access_ok() before calling this
 489 * function.
 490 *
 491 * Returns zero on success, or -EFAULT on error.
 492 * On error, the variable @x is set to zero.
 493 */
 494
 495#define __get_user(x, ptr)                                              \
 496        __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 497
 498/**
 499 * __put_user: - Write a simple value into user space, with less checking.
 500 * @x:   Value to copy to user space.
 501 * @ptr: Destination address, in user space.
 502 *
 503 * Context: User context only. This function may sleep if pagefaults are
 504 *          enabled.
 505 *
 506 * This macro copies a single simple value from kernel space to user
 507 * space.  It supports simple types like char and int, but not larger
 508 * data types like structures or arrays.
 509 *
 510 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 511 * to the result of dereferencing @ptr.
 512 *
 513 * Caller must check the pointer with access_ok() before calling this
 514 * function.
 515 *
 516 * Returns zero on success, or -EFAULT on error.
 517 */
 518
 519#define __put_user(x, ptr)                                              \
 520        __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 521
 522#define __get_user_unaligned __get_user
 523#define __put_user_unaligned __put_user
 524
 525/*
 526 * {get|put}_user_try and catch
 527 *
 528 * get_user_try {
 529 *      get_user_ex(...);
 530 * } get_user_catch(err)
 531 */
 532#define get_user_try            uaccess_try
 533#define get_user_catch(err)     uaccess_catch(err)
 534
 535#define get_user_ex(x, ptr)     do {                                    \
 536        unsigned long __gue_val;                                        \
 537        __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
 538        (x) = (__force __typeof__(*(ptr)))__gue_val;                    \
 539} while (0)
 540
 541#define put_user_try            uaccess_try
 542#define put_user_catch(err)     uaccess_catch(err)
 543
 544#define put_user_ex(x, ptr)                                             \
 545        __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 546
 547extern unsigned long
 548copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
 549extern __must_check long
 550strncpy_from_user(char *dst, const char __user *src, long count);
 551
 552extern __must_check long strlen_user(const char __user *str);
 553extern __must_check long strnlen_user(const char __user *str, long n);
 554
 555unsigned long __must_check clear_user(void __user *mem, unsigned long len);
 556unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
 557
 558extern void __cmpxchg_wrong_size(void)
 559        __compiletime_error("Bad argument size for cmpxchg");
 560
 561#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size)       \
 562({                                                                      \
 563        int __ret = 0;                                                  \
 564        __typeof__(ptr) __uval = (uval);                                \
 565        __typeof__(*(ptr)) __old = (old);                               \
 566        __typeof__(*(ptr)) __new = (new);                               \
 567        __uaccess_begin();                                              \
 568        switch (size) {                                                 \
 569        case 1:                                                         \
 570        {                                                               \
 571                asm volatile("\n"                                       \
 572                        "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n"          \
 573                        "2:\n"                                          \
 574                        "\t.section .fixup, \"ax\"\n"                   \
 575                        "3:\tmov     %3, %0\n"                          \
 576                        "\tjmp     2b\n"                                \
 577                        "\t.previous\n"                                 \
 578                        _ASM_EXTABLE(1b, 3b)                            \
 579                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 580                        : "i" (-EFAULT), "q" (__new), "1" (__old)       \
 581                        : "memory"                                      \
 582                );                                                      \
 583                break;                                                  \
 584        }                                                               \
 585        case 2:                                                         \
 586        {                                                               \
 587                asm volatile("\n"                                       \
 588                        "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n"          \
 589                        "2:\n"                                          \
 590                        "\t.section .fixup, \"ax\"\n"                   \
 591                        "3:\tmov     %3, %0\n"                          \
 592                        "\tjmp     2b\n"                                \
 593                        "\t.previous\n"                                 \
 594                        _ASM_EXTABLE(1b, 3b)                            \
 595                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 596                        : "i" (-EFAULT), "r" (__new), "1" (__old)       \
 597                        : "memory"                                      \
 598                );                                                      \
 599                break;                                                  \
 600        }                                                               \
 601        case 4:                                                         \
 602        {                                                               \
 603                asm volatile("\n"                                       \
 604                        "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"          \
 605                        "2:\n"                                          \
 606                        "\t.section .fixup, \"ax\"\n"                   \
 607                        "3:\tmov     %3, %0\n"                          \
 608                        "\tjmp     2b\n"                                \
 609                        "\t.previous\n"                                 \
 610                        _ASM_EXTABLE(1b, 3b)                            \
 611                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 612                        : "i" (-EFAULT), "r" (__new), "1" (__old)       \
 613                        : "memory"                                      \
 614                );                                                      \
 615                break;                                                  \
 616        }                                                               \
 617        case 8:                                                         \
 618        {                                                               \
 619                if (!IS_ENABLED(CONFIG_X86_64))                         \
 620                        __cmpxchg_wrong_size();                         \
 621                                                                        \
 622                asm volatile("\n"                                       \
 623                        "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"          \
 624                        "2:\n"                                          \
 625                        "\t.section .fixup, \"ax\"\n"                   \
 626                        "3:\tmov     %3, %0\n"                          \
 627                        "\tjmp     2b\n"                                \
 628                        "\t.previous\n"                                 \
 629                        _ASM_EXTABLE(1b, 3b)                            \
 630                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 631                        : "i" (-EFAULT), "r" (__new), "1" (__old)       \
 632                        : "memory"                                      \
 633                );                                                      \
 634                break;                                                  \
 635        }                                                               \
 636        default:                                                        \
 637                __cmpxchg_wrong_size();                                 \
 638        }                                                               \
 639        __uaccess_end();                                                \
 640        *__uval = __old;                                                \
 641        __ret;                                                          \
 642})
 643
 644#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new)               \
 645({                                                                      \
 646        access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ?                \
 647                __user_atomic_cmpxchg_inatomic((uval), (ptr),           \
 648                                (old), (new), sizeof(*(ptr))) :         \
 649                -EFAULT;                                                \
 650})
 651
 652/*
 653 * movsl can be slow when source and dest are not both 8-byte aligned
 654 */
 655#ifdef CONFIG_X86_INTEL_USERCOPY
 656extern struct movsl_mask {
 657        int mask;
 658} ____cacheline_aligned_in_smp movsl_mask;
 659#endif
 660
 661#define ARCH_HAS_NOCACHE_UACCESS 1
 662
 663#ifdef CONFIG_X86_32
 664# include <asm/uaccess_32.h>
 665#else
 666# include <asm/uaccess_64.h>
 667#endif
 668
 669unsigned long __must_check _copy_from_user(void *to, const void __user *from,
 670                                           unsigned n);
 671unsigned long __must_check _copy_to_user(void __user *to, const void *from,
 672                                         unsigned n);
 673
 674extern void __compiletime_error("usercopy buffer size is too small")
 675__bad_copy_user(void);
 676
 677static inline void copy_user_overflow(int size, unsigned long count)
 678{
 679        WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
 680}
 681
 682static __always_inline unsigned long __must_check
 683copy_from_user(void *to, const void __user *from, unsigned long n)
 684{
 685        int sz = __compiletime_object_size(to);
 686
 687        might_fault();
 688
 689        kasan_check_write(to, n);
 690
 691        if (likely(sz < 0 || sz >= n)) {
 692                check_object_size(to, n, false);
 693                n = _copy_from_user(to, from, n);
 694        } else if (!__builtin_constant_p(n))
 695                copy_user_overflow(sz, n);
 696        else
 697                __bad_copy_user();
 698
 699        return n;
 700}
 701
 702static __always_inline unsigned long __must_check
 703copy_to_user(void __user *to, const void *from, unsigned long n)
 704{
 705        int sz = __compiletime_object_size(from);
 706
 707        kasan_check_read(from, n);
 708
 709        might_fault();
 710
 711        if (likely(sz < 0 || sz >= n)) {
 712                check_object_size(from, n, true);
 713                n = _copy_to_user(to, from, n);
 714        } else if (!__builtin_constant_p(n))
 715                copy_user_overflow(sz, n);
 716        else
 717                __bad_copy_user();
 718
 719        return n;
 720}
 721
 722/*
 723 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
 724 * nested NMI paths are careful to preserve CR2.
 725 *
 726 * Caller must use pagefault_enable/disable, or run in interrupt context,
 727 * and also do a uaccess_ok() check
 728 */
 729#define __copy_from_user_nmi __copy_from_user_inatomic
 730
 731/*
 732 * The "unsafe" user accesses aren't really "unsafe", but the naming
 733 * is a big fat warning: you have to not only do the access_ok()
 734 * checking before using them, but you have to surround them with the
 735 * user_access_begin/end() pair.
 736 */
 737#define user_access_begin()     __uaccess_begin()
 738#define user_access_end()       __uaccess_end()
 739
 740#define unsafe_put_user(x, ptr, err_label)                                      \
 741do {                                                                            \
 742        int __pu_err;                                                           \
 743        __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT);         \
 744        if (unlikely(__pu_err)) goto err_label;                                 \
 745} while (0)
 746
 747#define unsafe_get_user(x, ptr, err_label)                                      \
 748do {                                                                            \
 749        int __gu_err;                                                           \
 750        unsigned long __gu_val;                                                 \
 751        __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT);    \
 752        (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
 753        if (unlikely(__gu_err)) goto err_label;                                 \
 754} while (0)
 755
 756#endif /* _ASM_X86_UACCESS_H */
 757
 758
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.