linux/arch/x86/include/asm/uaccess.h
<<
>>
Prefs
   1#ifndef _ASM_X86_UACCESS_H
   2#define _ASM_X86_UACCESS_H
   3/*
   4 * User space memory access functions
   5 */
   6#include <linux/errno.h>
   7#include <linux/compiler.h>
   8#include <linux/kasan-checks.h>
   9#include <linux/thread_info.h>
  10#include <linux/string.h>
  11#include <asm/asm.h>
  12#include <asm/page.h>
  13#include <asm/smap.h>
  14#include <asm/extable.h>
  15
  16#define VERIFY_READ 0
  17#define VERIFY_WRITE 1
  18
  19/*
  20 * The fs value determines whether argument validity checking should be
  21 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  22 * get_fs() == KERNEL_DS, checking is bypassed.
  23 *
  24 * For historical reasons, these macros are grossly misnamed.
  25 */
  26
  27#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
  28
  29#define KERNEL_DS       MAKE_MM_SEG(-1UL)
  30#define USER_DS         MAKE_MM_SEG(TASK_SIZE_MAX)
  31
  32#define get_ds()        (KERNEL_DS)
  33#define get_fs()        (current->thread.addr_limit)
  34#define set_fs(x)       (current->thread.addr_limit = (x))
  35
  36#define segment_eq(a, b)        ((a).seg == (b).seg)
  37
  38#define user_addr_max() (current->thread.addr_limit.seg)
  39#define __addr_ok(addr)         \
  40        ((unsigned long __force)(addr) < user_addr_max())
  41
  42/*
  43 * Test whether a block of memory is a valid user space address.
  44 * Returns 0 if the range is valid, nonzero otherwise.
  45 */
  46static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
  47{
  48        /*
  49         * If we have used "sizeof()" for the size,
  50         * we know it won't overflow the limit (but
  51         * it might overflow the 'addr', so it's
  52         * important to subtract the size from the
  53         * limit, not add it to the address).
  54         */
  55        if (__builtin_constant_p(size))
  56                return unlikely(addr > limit - size);
  57
  58        /* Arbitrary sizes? Be careful about overflow */
  59        addr += size;
  60        if (unlikely(addr < size))
  61                return true;
  62        return unlikely(addr > limit);
  63}
  64
  65#define __range_not_ok(addr, size, limit)                               \
  66({                                                                      \
  67        __chk_user_ptr(addr);                                           \
  68        __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
  69})
  70
  71#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  72# define WARN_ON_IN_IRQ()       WARN_ON_ONCE(!in_task())
  73#else
  74# define WARN_ON_IN_IRQ()
  75#endif
  76
  77/**
  78 * access_ok: - Checks if a user space pointer is valid
  79 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
  80 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  81 *        to write to a block, it is always safe to read from it.
  82 * @addr: User space pointer to start of block to check
  83 * @size: Size of block to check
  84 *
  85 * Context: User context only. This function may sleep if pagefaults are
  86 *          enabled.
  87 *
  88 * Checks if a pointer to a block of memory in user space is valid.
  89 *
  90 * Returns true (nonzero) if the memory block may be valid, false (zero)
  91 * if it is definitely invalid.
  92 *
  93 * Note that, depending on architecture, this function probably just
  94 * checks that the pointer is in the user space range - after calling
  95 * this function, memory access functions may still return -EFAULT.
  96 */
  97#define access_ok(type, addr, size)                                     \
  98({                                                                      \
  99        WARN_ON_IN_IRQ();                                               \
 100        likely(!__range_not_ok(addr, size, user_addr_max()));           \
 101})
 102
 103/*
 104 * These are the main single-value transfer routines.  They automatically
 105 * use the right size if we just have the right pointer type.
 106 *
 107 * This gets kind of ugly. We want to return _two_ values in "get_user()"
 108 * and yet we don't want to do any pointers, because that is too much
 109 * of a performance impact. Thus we have a few rather ugly macros here,
 110 * and hide all the ugliness from the user.
 111 *
 112 * The "__xxx" versions of the user access functions are versions that
 113 * do not verify the address space, that must have been done previously
 114 * with a separate "access_ok()" call (this is used when we do multiple
 115 * accesses to the same area of user memory).
 116 */
 117
 118extern int __get_user_1(void);
 119extern int __get_user_2(void);
 120extern int __get_user_4(void);
 121extern int __get_user_8(void);
 122extern int __get_user_bad(void);
 123
 124#define __uaccess_begin() stac()
 125#define __uaccess_end()   clac()
 126
 127/*
 128 * This is a type: either unsigned long, if the argument fits into
 129 * that type, or otherwise unsigned long long.
 130 */
 131#define __inttype(x) \
 132__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 133
 134/**
 135 * get_user: - Get a simple variable from user space.
 136 * @x:   Variable to store result.
 137 * @ptr: Source address, in user space.
 138 *
 139 * Context: User context only. This function may sleep if pagefaults are
 140 *          enabled.
 141 *
 142 * This macro copies a single simple variable from user space to kernel
 143 * space.  It supports simple types like char and int, but not larger
 144 * data types like structures or arrays.
 145 *
 146 * @ptr must have pointer-to-simple-variable type, and the result of
 147 * dereferencing @ptr must be assignable to @x without a cast.
 148 *
 149 * Returns zero on success, or -EFAULT on error.
 150 * On error, the variable @x is set to zero.
 151 */
 152/*
 153 * Careful: we have to cast the result to the type of the pointer
 154 * for sign reasons.
 155 *
 156 * The use of _ASM_DX as the register specifier is a bit of a
 157 * simplification, as gcc only cares about it as the starting point
 158 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
 159 * (%ecx being the next register in gcc's x86 register sequence), and
 160 * %rdx on 64 bits.
 161 *
 162 * Clang/LLVM cares about the size of the register, but still wants
 163 * the base register for something that ends up being a pair.
 164 */
 165#define get_user(x, ptr)                                                \
 166({                                                                      \
 167        int __ret_gu;                                                   \
 168        register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);            \
 169        register void *__sp asm(_ASM_SP);                               \
 170        __chk_user_ptr(ptr);                                            \
 171        might_fault();                                                  \
 172        asm volatile("call __get_user_%P4"                              \
 173                     : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp)    \
 174                     : "0" (ptr), "i" (sizeof(*(ptr))));                \
 175        (x) = (__force __typeof__(*(ptr))) __val_gu;                    \
 176        __builtin_expect(__ret_gu, 0);                                  \
 177})
 178
 179#define __put_user_x(size, x, ptr, __ret_pu)                    \
 180        asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
 181                     : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
 182
 183
 184
 185#ifdef CONFIG_X86_32
 186#define __put_user_asm_u64(x, addr, err, errret)                        \
 187        asm volatile("\n"                                               \
 188                     "1:        movl %%eax,0(%2)\n"                     \
 189                     "2:        movl %%edx,4(%2)\n"                     \
 190                     "3:"                                               \
 191                     ".section .fixup,\"ax\"\n"                         \
 192                     "4:        movl %3,%0\n"                           \
 193                     "  jmp 3b\n"                                       \
 194                     ".previous\n"                                      \
 195                     _ASM_EXTABLE(1b, 4b)                               \
 196                     _ASM_EXTABLE(2b, 4b)                               \
 197                     : "=r" (err)                                       \
 198                     : "A" (x), "r" (addr), "i" (errret), "0" (err))
 199
 200#define __put_user_asm_ex_u64(x, addr)                                  \
 201        asm volatile("\n"                                               \
 202                     "1:        movl %%eax,0(%1)\n"                     \
 203                     "2:        movl %%edx,4(%1)\n"                     \
 204                     "3:"                                               \
 205                     _ASM_EXTABLE_EX(1b, 2b)                            \
 206                     _ASM_EXTABLE_EX(2b, 3b)                            \
 207                     : : "A" (x), "r" (addr))
 208
 209#define __put_user_x8(x, ptr, __ret_pu)                         \
 210        asm volatile("call __put_user_8" : "=a" (__ret_pu)      \
 211                     : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
 212#else
 213#define __put_user_asm_u64(x, ptr, retval, errret) \
 214        __put_user_asm(x, ptr, retval, "q", "", "er", errret)
 215#define __put_user_asm_ex_u64(x, addr)  \
 216        __put_user_asm_ex(x, addr, "q", "", "er")
 217#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
 218#endif
 219
 220extern void __put_user_bad(void);
 221
 222/*
 223 * Strange magic calling convention: pointer in %ecx,
 224 * value in %eax(:%edx), return value in %eax. clobbers %rbx
 225 */
 226extern void __put_user_1(void);
 227extern void __put_user_2(void);
 228extern void __put_user_4(void);
 229extern void __put_user_8(void);
 230
 231/**
 232 * put_user: - Write a simple value into user space.
 233 * @x:   Value to copy to user space.
 234 * @ptr: Destination address, in user space.
 235 *
 236 * Context: User context only. This function may sleep if pagefaults are
 237 *          enabled.
 238 *
 239 * This macro copies a single simple value from kernel space to user
 240 * space.  It supports simple types like char and int, but not larger
 241 * data types like structures or arrays.
 242 *
 243 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 244 * to the result of dereferencing @ptr.
 245 *
 246 * Returns zero on success, or -EFAULT on error.
 247 */
 248#define put_user(x, ptr)                                        \
 249({                                                              \
 250        int __ret_pu;                                           \
 251        __typeof__(*(ptr)) __pu_val;                            \
 252        __chk_user_ptr(ptr);                                    \
 253        might_fault();                                          \
 254        __pu_val = x;                                           \
 255        switch (sizeof(*(ptr))) {                               \
 256        case 1:                                                 \
 257                __put_user_x(1, __pu_val, ptr, __ret_pu);       \
 258                break;                                          \
 259        case 2:                                                 \
 260                __put_user_x(2, __pu_val, ptr, __ret_pu);       \
 261                break;                                          \
 262        case 4:                                                 \
 263                __put_user_x(4, __pu_val, ptr, __ret_pu);       \
 264                break;                                          \
 265        case 8:                                                 \
 266                __put_user_x8(__pu_val, ptr, __ret_pu);         \
 267                break;                                          \
 268        default:                                                \
 269                __put_user_x(X, __pu_val, ptr, __ret_pu);       \
 270                break;                                          \
 271        }                                                       \
 272        __builtin_expect(__ret_pu, 0);                          \
 273})
 274
 275#define __put_user_size(x, ptr, size, retval, errret)                   \
 276do {                                                                    \
 277        retval = 0;                                                     \
 278        __chk_user_ptr(ptr);                                            \
 279        switch (size) {                                                 \
 280        case 1:                                                         \
 281                __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
 282                break;                                                  \
 283        case 2:                                                         \
 284                __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
 285                break;                                                  \
 286        case 4:                                                         \
 287                __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
 288                break;                                                  \
 289        case 8:                                                         \
 290                __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval,  \
 291                                   errret);                             \
 292                break;                                                  \
 293        default:                                                        \
 294                __put_user_bad();                                       \
 295        }                                                               \
 296} while (0)
 297
 298/*
 299 * This doesn't do __uaccess_begin/end - the exception handling
 300 * around it must do that.
 301 */
 302#define __put_user_size_ex(x, ptr, size)                                \
 303do {                                                                    \
 304        __chk_user_ptr(ptr);                                            \
 305        switch (size) {                                                 \
 306        case 1:                                                         \
 307                __put_user_asm_ex(x, ptr, "b", "b", "iq");              \
 308                break;                                                  \
 309        case 2:                                                         \
 310                __put_user_asm_ex(x, ptr, "w", "w", "ir");              \
 311                break;                                                  \
 312        case 4:                                                         \
 313                __put_user_asm_ex(x, ptr, "l", "k", "ir");              \
 314                break;                                                  \
 315        case 8:                                                         \
 316                __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr);      \
 317                break;                                                  \
 318        default:                                                        \
 319                __put_user_bad();                                       \
 320        }                                                               \
 321} while (0)
 322
 323#ifdef CONFIG_X86_32
 324#define __get_user_asm_u64(x, ptr, retval, errret)                      \
 325({                                                                      \
 326        __typeof__(ptr) __ptr = (ptr);                                  \
 327        asm volatile(ASM_STAC "\n"                                      \
 328                     "1:        movl %2,%%eax\n"                        \
 329                     "2:        movl %3,%%edx\n"                        \
 330                     "3: " ASM_CLAC "\n"                                \
 331                     ".section .fixup,\"ax\"\n"                         \
 332                     "4:        mov %4,%0\n"                            \
 333                     "  xorl %%eax,%%eax\n"                             \
 334                     "  xorl %%edx,%%edx\n"                             \
 335                     "  jmp 3b\n"                                       \
 336                     ".previous\n"                                      \
 337                     _ASM_EXTABLE(1b, 4b)                               \
 338                     _ASM_EXTABLE(2b, 4b)                               \
 339                     : "=r" (retval), "=A"(x)                           \
 340                     : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
 341                       "i" (errret), "0" (retval));                     \
 342})
 343
 344#define __get_user_asm_ex_u64(x, ptr)                   (x) = __get_user_bad()
 345#else
 346#define __get_user_asm_u64(x, ptr, retval, errret) \
 347         __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
 348#define __get_user_asm_ex_u64(x, ptr) \
 349         __get_user_asm_ex(x, ptr, "q", "", "=r")
 350#endif
 351
 352#define __get_user_size(x, ptr, size, retval, errret)                   \
 353do {                                                                    \
 354        retval = 0;                                                     \
 355        __chk_user_ptr(ptr);                                            \
 356        switch (size) {                                                 \
 357        case 1:                                                         \
 358                __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
 359                break;                                                  \
 360        case 2:                                                         \
 361                __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
 362                break;                                                  \
 363        case 4:                                                         \
 364                __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
 365                break;                                                  \
 366        case 8:                                                         \
 367                __get_user_asm_u64(x, ptr, retval, errret);             \
 368                break;                                                  \
 369        default:                                                        \
 370                (x) = __get_user_bad();                                 \
 371        }                                                               \
 372} while (0)
 373
 374#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
 375        asm volatile("\n"                                               \
 376                     "1:        mov"itype" %2,%"rtype"1\n"              \
 377                     "2:\n"                                             \
 378                     ".section .fixup,\"ax\"\n"                         \
 379                     "3:        mov %3,%0\n"                            \
 380                     "  xor"itype" %"rtype"1,%"rtype"1\n"               \
 381                     "  jmp 2b\n"                                       \
 382                     ".previous\n"                                      \
 383                     _ASM_EXTABLE(1b, 3b)                               \
 384                     : "=r" (err), ltype(x)                             \
 385                     : "m" (__m(addr)), "i" (errret), "0" (err))
 386
 387/*
 388 * This doesn't do __uaccess_begin/end - the exception handling
 389 * around it must do that.
 390 */
 391#define __get_user_size_ex(x, ptr, size)                                \
 392do {                                                                    \
 393        __chk_user_ptr(ptr);                                            \
 394        switch (size) {                                                 \
 395        case 1:                                                         \
 396                __get_user_asm_ex(x, ptr, "b", "b", "=q");              \
 397                break;                                                  \
 398        case 2:                                                         \
 399                __get_user_asm_ex(x, ptr, "w", "w", "=r");              \
 400                break;                                                  \
 401        case 4:                                                         \
 402                __get_user_asm_ex(x, ptr, "l", "k", "=r");              \
 403                break;                                                  \
 404        case 8:                                                         \
 405                __get_user_asm_ex_u64(x, ptr);                          \
 406                break;                                                  \
 407        default:                                                        \
 408                (x) = __get_user_bad();                                 \
 409        }                                                               \
 410} while (0)
 411
 412#define __get_user_asm_ex(x, addr, itype, rtype, ltype)                 \
 413        asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
 414                     "2:\n"                                             \
 415                     ".section .fixup,\"ax\"\n"                         \
 416                     "3:xor"itype" %"rtype"0,%"rtype"0\n"               \
 417                     "  jmp 2b\n"                                       \
 418                     ".previous\n"                                      \
 419                     _ASM_EXTABLE_EX(1b, 3b)                            \
 420                     : ltype(x) : "m" (__m(addr)))
 421
 422#define __put_user_nocheck(x, ptr, size)                        \
 423({                                                              \
 424        int __pu_err;                                           \
 425        __uaccess_begin();                                      \
 426        __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
 427        __uaccess_end();                                        \
 428        __builtin_expect(__pu_err, 0);                          \
 429})
 430
 431#define __get_user_nocheck(x, ptr, size)                                \
 432({                                                                      \
 433        int __gu_err;                                                   \
 434        __inttype(*(ptr)) __gu_val;                                     \
 435        __uaccess_begin();                                              \
 436        __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
 437        __uaccess_end();                                                \
 438        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 439        __builtin_expect(__gu_err, 0);                                  \
 440})
 441
 442/* FIXME: this hack is definitely wrong -AK */
 443struct __large_struct { unsigned long buf[100]; };
 444#define __m(x) (*(struct __large_struct __user *)(x))
 445
 446/*
 447 * Tell gcc we read from memory instead of writing: this is because
 448 * we do not write to any memory gcc knows about, so there are no
 449 * aliasing issues.
 450 */
 451#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
 452        asm volatile("\n"                                               \
 453                     "1:        mov"itype" %"rtype"1,%2\n"              \
 454                     "2:\n"                                             \
 455                     ".section .fixup,\"ax\"\n"                         \
 456                     "3:        mov %3,%0\n"                            \
 457                     "  jmp 2b\n"                                       \
 458                     ".previous\n"                                      \
 459                     _ASM_EXTABLE(1b, 3b)                               \
 460                     : "=r"(err)                                        \
 461                     : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
 462
 463#define __put_user_asm_ex(x, addr, itype, rtype, ltype)                 \
 464        asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
 465                     "2:\n"                                             \
 466                     _ASM_EXTABLE_EX(1b, 2b)                            \
 467                     : : ltype(x), "m" (__m(addr)))
 468
 469/*
 470 * uaccess_try and catch
 471 */
 472#define uaccess_try     do {                                            \
 473        current->thread.uaccess_err = 0;                                \
 474        __uaccess_begin();                                              \
 475        barrier();
 476
 477#define uaccess_catch(err)                                              \
 478        __uaccess_end();                                                \
 479        (err) |= (current->thread.uaccess_err ? -EFAULT : 0);           \
 480} while (0)
 481
 482/**
 483 * __get_user: - Get a simple variable from user space, with less checking.
 484 * @x:   Variable to store result.
 485 * @ptr: Source address, in user space.
 486 *
 487 * Context: User context only. This function may sleep if pagefaults are
 488 *          enabled.
 489 *
 490 * This macro copies a single simple variable from user space to kernel
 491 * space.  It supports simple types like char and int, but not larger
 492 * data types like structures or arrays.
 493 *
 494 * @ptr must have pointer-to-simple-variable type, and the result of
 495 * dereferencing @ptr must be assignable to @x without a cast.
 496 *
 497 * Caller must check the pointer with access_ok() before calling this
 498 * function.
 499 *
 500 * Returns zero on success, or -EFAULT on error.
 501 * On error, the variable @x is set to zero.
 502 */
 503
 504#define __get_user(x, ptr)                                              \
 505        __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 506
 507/**
 508 * __put_user: - Write a simple value into user space, with less checking.
 509 * @x:   Value to copy to user space.
 510 * @ptr: Destination address, in user space.
 511 *
 512 * Context: User context only. This function may sleep if pagefaults are
 513 *          enabled.
 514 *
 515 * This macro copies a single simple value from kernel space to user
 516 * space.  It supports simple types like char and int, but not larger
 517 * data types like structures or arrays.
 518 *
 519 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 520 * to the result of dereferencing @ptr.
 521 *
 522 * Caller must check the pointer with access_ok() before calling this
 523 * function.
 524 *
 525 * Returns zero on success, or -EFAULT on error.
 526 */
 527
 528#define __put_user(x, ptr)                                              \
 529        __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 530
 531#define __get_user_unaligned __get_user
 532#define __put_user_unaligned __put_user
 533
 534/*
 535 * {get|put}_user_try and catch
 536 *
 537 * get_user_try {
 538 *      get_user_ex(...);
 539 * } get_user_catch(err)
 540 */
 541#define get_user_try            uaccess_try
 542#define get_user_catch(err)     uaccess_catch(err)
 543
 544#define get_user_ex(x, ptr)     do {                                    \
 545        unsigned long __gue_val;                                        \
 546        __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
 547        (x) = (__force __typeof__(*(ptr)))__gue_val;                    \
 548} while (0)
 549
 550#define put_user_try            uaccess_try
 551#define put_user_catch(err)     uaccess_catch(err)
 552
 553#define put_user_ex(x, ptr)                                             \
 554        __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 555
 556extern unsigned long
 557copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
 558extern __must_check long
 559strncpy_from_user(char *dst, const char __user *src, long count);
 560
 561extern __must_check long strlen_user(const char __user *str);
 562extern __must_check long strnlen_user(const char __user *str, long n);
 563
 564unsigned long __must_check clear_user(void __user *mem, unsigned long len);
 565unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
 566
 567extern void __cmpxchg_wrong_size(void)
 568        __compiletime_error("Bad argument size for cmpxchg");
 569
 570#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size)       \
 571({                                                                      \
 572        int __ret = 0;                                                  \
 573        __typeof__(ptr) __uval = (uval);                                \
 574        __typeof__(*(ptr)) __old = (old);                               \
 575        __typeof__(*(ptr)) __new = (new);                               \
 576        __uaccess_begin();                                              \
 577        switch (size) {                                                 \
 578        case 1:                                                         \
 579        {                                                               \
 580                asm volatile("\n"                                       \
 581                        "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n"          \
 582                        "2:\n"                                          \
 583                        "\t.section .fixup, \"ax\"\n"                   \
 584                        "3:\tmov     %3, %0\n"                          \
 585                        "\tjmp     2b\n"                                \
 586                        "\t.previous\n"                                 \
 587                        _ASM_EXTABLE(1b, 3b)                            \
 588                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 589                        : "i" (-EFAULT), "q" (__new), "1" (__old)       \
 590                        : "memory"                                      \
 591                );                                                      \
 592                break;                                                  \
 593        }                                                               \
 594        case 2:                                                         \
 595        {                                                               \
 596                asm volatile("\n"                                       \
 597                        "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n"          \
 598                        "2:\n"                                          \
 599                        "\t.section .fixup, \"ax\"\n"                   \
 600                        "3:\tmov     %3, %0\n"                          \
 601                        "\tjmp     2b\n"                                \
 602                        "\t.previous\n"                                 \
 603                        _ASM_EXTABLE(1b, 3b)                            \
 604                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 605                        : "i" (-EFAULT), "r" (__new), "1" (__old)       \
 606                        : "memory"                                      \
 607                );                                                      \
 608                break;                                                  \
 609        }                                                               \
 610        case 4:                                                         \
 611        {                                                               \
 612                asm volatile("\n"                                       \
 613                        "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"          \
 614                        "2:\n"                                          \
 615                        "\t.section .fixup, \"ax\"\n"                   \
 616                        "3:\tmov     %3, %0\n"                          \
 617                        "\tjmp     2b\n"                                \
 618                        "\t.previous\n"                                 \
 619                        _ASM_EXTABLE(1b, 3b)                            \
 620                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 621                        : "i" (-EFAULT), "r" (__new), "1" (__old)       \
 622                        : "memory"                                      \
 623                );                                                      \
 624                break;                                                  \
 625        }                                                               \
 626        case 8:                                                         \
 627        {                                                               \
 628                if (!IS_ENABLED(CONFIG_X86_64))                         \
 629                        __cmpxchg_wrong_size();                         \
 630                                                                        \
 631                asm volatile("\n"                                       \
 632                        "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"          \
 633                        "2:\n"                                          \
 634                        "\t.section .fixup, \"ax\"\n"                   \
 635                        "3:\tmov     %3, %0\n"                          \
 636                        "\tjmp     2b\n"                                \
 637                        "\t.previous\n"                                 \
 638                        _ASM_EXTABLE(1b, 3b)                            \
 639                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 640                        : "i" (-EFAULT), "r" (__new), "1" (__old)       \
 641                        : "memory"                                      \
 642                );                                                      \
 643                break;                                                  \
 644        }                                                               \
 645        default:                                                        \
 646                __cmpxchg_wrong_size();                                 \
 647        }                                                               \
 648        __uaccess_end();                                                \
 649        *__uval = __old;                                                \
 650        __ret;                                                          \
 651})
 652
 653#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new)               \
 654({                                                                      \
 655        access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ?                \
 656                __user_atomic_cmpxchg_inatomic((uval), (ptr),           \
 657                                (old), (new), sizeof(*(ptr))) :         \
 658                -EFAULT;                                                \
 659})
 660
 661/*
 662 * movsl can be slow when source and dest are not both 8-byte aligned
 663 */
 664#ifdef CONFIG_X86_INTEL_USERCOPY
 665extern struct movsl_mask {
 666        int mask;
 667} ____cacheline_aligned_in_smp movsl_mask;
 668#endif
 669
 670#define ARCH_HAS_NOCACHE_UACCESS 1
 671
 672#ifdef CONFIG_X86_32
 673# include <asm/uaccess_32.h>
 674#else
 675# include <asm/uaccess_64.h>
 676#endif
 677
 678unsigned long __must_check _copy_from_user(void *to, const void __user *from,
 679                                           unsigned n);
 680unsigned long __must_check _copy_to_user(void __user *to, const void *from,
 681                                         unsigned n);
 682
 683extern void __compiletime_error("usercopy buffer size is too small")
 684__bad_copy_user(void);
 685
 686static inline void copy_user_overflow(int size, unsigned long count)
 687{
 688        WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
 689}
 690
 691static __always_inline unsigned long __must_check
 692copy_from_user(void *to, const void __user *from, unsigned long n)
 693{
 694        int sz = __compiletime_object_size(to);
 695
 696        might_fault();
 697
 698        kasan_check_write(to, n);
 699
 700        if (likely(sz < 0 || sz >= n)) {
 701                check_object_size(to, n, false);
 702                n = _copy_from_user(to, from, n);
 703        } else if (!__builtin_constant_p(n))
 704                copy_user_overflow(sz, n);
 705        else
 706                __bad_copy_user();
 707
 708        return n;
 709}
 710
 711static __always_inline unsigned long __must_check
 712copy_to_user(void __user *to, const void *from, unsigned long n)
 713{
 714        int sz = __compiletime_object_size(from);
 715
 716        kasan_check_read(from, n);
 717
 718        might_fault();
 719
 720        if (likely(sz < 0 || sz >= n)) {
 721                check_object_size(from, n, true);
 722                n = _copy_to_user(to, from, n);
 723        } else if (!__builtin_constant_p(n))
 724                copy_user_overflow(sz, n);
 725        else
 726                __bad_copy_user();
 727
 728        return n;
 729}
 730
 731/*
 732 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
 733 * nested NMI paths are careful to preserve CR2.
 734 *
 735 * Caller must use pagefault_enable/disable, or run in interrupt context,
 736 * and also do a uaccess_ok() check
 737 */
 738#define __copy_from_user_nmi __copy_from_user_inatomic
 739
 740/*
 741 * The "unsafe" user accesses aren't really "unsafe", but the naming
 742 * is a big fat warning: you have to not only do the access_ok()
 743 * checking before using them, but you have to surround them with the
 744 * user_access_begin/end() pair.
 745 */
 746#define user_access_begin()     __uaccess_begin()
 747#define user_access_end()       __uaccess_end()
 748
 749#define unsafe_put_user(x, ptr, err_label)                                      \
 750do {                                                                            \
 751        int __pu_err;                                                           \
 752        __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT);         \
 753        if (unlikely(__pu_err)) goto err_label;                                 \
 754} while (0)
 755
 756#define unsafe_get_user(x, ptr, err_label)                                      \
 757do {                                                                            \
 758        int __gu_err;                                                           \
 759        unsigned long __gu_val;                                                 \
 760        __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT);    \
 761        (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
 762        if (unlikely(__gu_err)) goto err_label;                                 \
 763} while (0)
 764
 765#endif /* _ASM_X86_UACCESS_H */
 766
 767
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.