linux/arch/x86/include/asm/uaccess.h
<<
>>
Prefs
   1#ifndef _ASM_X86_UACCESS_H
   2#define _ASM_X86_UACCESS_H
   3/*
   4 * User space memory access functions
   5 */
   6#include <linux/errno.h>
   7#include <linux/compiler.h>
   8#include <linux/kasan-checks.h>
   9#include <linux/thread_info.h>
  10#include <linux/string.h>
  11#include <asm/asm.h>
  12#include <asm/page.h>
  13#include <asm/smap.h>
  14
  15#define VERIFY_READ 0
  16#define VERIFY_WRITE 1
  17
  18/*
  19 * The fs value determines whether argument validity checking should be
  20 * performed or not.  If get_fs() == USER_DS, checking is performed, with
  21 * get_fs() == KERNEL_DS, checking is bypassed.
  22 *
  23 * For historical reasons, these macros are grossly misnamed.
  24 */
  25
  26#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
  27
  28#define KERNEL_DS       MAKE_MM_SEG(-1UL)
  29#define USER_DS         MAKE_MM_SEG(TASK_SIZE_MAX)
  30
  31#define get_ds()        (KERNEL_DS)
  32#define get_fs()        (current_thread_info()->addr_limit)
  33#define set_fs(x)       (current_thread_info()->addr_limit = (x))
  34
  35#define segment_eq(a, b)        ((a).seg == (b).seg)
  36
  37#define user_addr_max() (current_thread_info()->addr_limit.seg)
  38#define __addr_ok(addr)         \
  39        ((unsigned long __force)(addr) < user_addr_max())
  40
  41/*
  42 * Test whether a block of memory is a valid user space address.
  43 * Returns 0 if the range is valid, nonzero otherwise.
  44 */
  45static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
  46{
  47        /*
  48         * If we have used "sizeof()" for the size,
  49         * we know it won't overflow the limit (but
  50         * it might overflow the 'addr', so it's
  51         * important to subtract the size from the
  52         * limit, not add it to the address).
  53         */
  54        if (__builtin_constant_p(size))
  55                return unlikely(addr > limit - size);
  56
  57        /* Arbitrary sizes? Be careful about overflow */
  58        addr += size;
  59        if (unlikely(addr < size))
  60                return true;
  61        return unlikely(addr > limit);
  62}
  63
  64#define __range_not_ok(addr, size, limit)                               \
  65({                                                                      \
  66        __chk_user_ptr(addr);                                           \
  67        __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
  68})
  69
  70/**
  71 * access_ok: - Checks if a user space pointer is valid
  72 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
  73 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  74 *        to write to a block, it is always safe to read from it.
  75 * @addr: User space pointer to start of block to check
  76 * @size: Size of block to check
  77 *
  78 * Context: User context only. This function may sleep if pagefaults are
  79 *          enabled.
  80 *
  81 * Checks if a pointer to a block of memory in user space is valid.
  82 *
  83 * Returns true (nonzero) if the memory block may be valid, false (zero)
  84 * if it is definitely invalid.
  85 *
  86 * Note that, depending on architecture, this function probably just
  87 * checks that the pointer is in the user space range - after calling
  88 * this function, memory access functions may still return -EFAULT.
  89 */
  90#define access_ok(type, addr, size) \
  91        likely(!__range_not_ok(addr, size, user_addr_max()))
  92
  93/*
  94 * The exception table consists of triples of addresses relative to the
  95 * exception table entry itself. The first address is of an instruction
  96 * that is allowed to fault, the second is the target at which the program
  97 * should continue. The third is a handler function to deal with the fault
  98 * caused by the instruction in the first field.
  99 *
 100 * All the routines below use bits of fixup code that are out of line
 101 * with the main instruction path.  This means when everything is well,
 102 * we don't even have to jump over them.  Further, they do not intrude
 103 * on our cache or tlb entries.
 104 */
 105
 106struct exception_table_entry {
 107        int insn, fixup, handler;
 108};
 109
 110#define ARCH_HAS_RELATIVE_EXTABLE
 111
 112#define swap_ex_entry_fixup(a, b, tmp, delta)                   \
 113        do {                                                    \
 114                (a)->fixup = (b)->fixup + (delta);              \
 115                (b)->fixup = (tmp).fixup - (delta);             \
 116                (a)->handler = (b)->handler + (delta);          \
 117                (b)->handler = (tmp).handler - (delta);         \
 118        } while (0)
 119
 120extern int fixup_exception(struct pt_regs *regs, int trapnr);
 121extern bool ex_has_fault_handler(unsigned long ip);
 122extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
 123
 124/*
 125 * These are the main single-value transfer routines.  They automatically
 126 * use the right size if we just have the right pointer type.
 127 *
 128 * This gets kind of ugly. We want to return _two_ values in "get_user()"
 129 * and yet we don't want to do any pointers, because that is too much
 130 * of a performance impact. Thus we have a few rather ugly macros here,
 131 * and hide all the ugliness from the user.
 132 *
 133 * The "__xxx" versions of the user access functions are versions that
 134 * do not verify the address space, that must have been done previously
 135 * with a separate "access_ok()" call (this is used when we do multiple
 136 * accesses to the same area of user memory).
 137 */
 138
 139extern int __get_user_1(void);
 140extern int __get_user_2(void);
 141extern int __get_user_4(void);
 142extern int __get_user_8(void);
 143extern int __get_user_bad(void);
 144
 145#define __uaccess_begin() stac()
 146#define __uaccess_end()   clac()
 147
 148/*
 149 * This is a type: either unsigned long, if the argument fits into
 150 * that type, or otherwise unsigned long long.
 151 */
 152#define __inttype(x) \
 153__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 154
 155/**
 156 * get_user: - Get a simple variable from user space.
 157 * @x:   Variable to store result.
 158 * @ptr: Source address, in user space.
 159 *
 160 * Context: User context only. This function may sleep if pagefaults are
 161 *          enabled.
 162 *
 163 * This macro copies a single simple variable from user space to kernel
 164 * space.  It supports simple types like char and int, but not larger
 165 * data types like structures or arrays.
 166 *
 167 * @ptr must have pointer-to-simple-variable type, and the result of
 168 * dereferencing @ptr must be assignable to @x without a cast.
 169 *
 170 * Returns zero on success, or -EFAULT on error.
 171 * On error, the variable @x is set to zero.
 172 */
 173/*
 174 * Careful: we have to cast the result to the type of the pointer
 175 * for sign reasons.
 176 *
 177 * The use of _ASM_DX as the register specifier is a bit of a
 178 * simplification, as gcc only cares about it as the starting point
 179 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
 180 * (%ecx being the next register in gcc's x86 register sequence), and
 181 * %rdx on 64 bits.
 182 *
 183 * Clang/LLVM cares about the size of the register, but still wants
 184 * the base register for something that ends up being a pair.
 185 */
 186#define get_user(x, ptr)                                                \
 187({                                                                      \
 188        int __ret_gu;                                                   \
 189        register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);            \
 190        register void *__sp asm(_ASM_SP);                               \
 191        __chk_user_ptr(ptr);                                            \
 192        might_fault();                                                  \
 193        asm volatile("call __get_user_%P4"                              \
 194                     : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp)    \
 195                     : "0" (ptr), "i" (sizeof(*(ptr))));                \
 196        (x) = (__force __typeof__(*(ptr))) __val_gu;                    \
 197        __builtin_expect(__ret_gu, 0);                                  \
 198})
 199
 200#define __put_user_x(size, x, ptr, __ret_pu)                    \
 201        asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
 202                     : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
 203
 204
 205
 206#ifdef CONFIG_X86_32
 207#define __put_user_asm_u64(x, addr, err, errret)                        \
 208        asm volatile("\n"                                               \
 209                     "1:        movl %%eax,0(%2)\n"                     \
 210                     "2:        movl %%edx,4(%2)\n"                     \
 211                     "3:"                                               \
 212                     ".section .fixup,\"ax\"\n"                         \
 213                     "4:        movl %3,%0\n"                           \
 214                     "  jmp 3b\n"                                       \
 215                     ".previous\n"                                      \
 216                     _ASM_EXTABLE(1b, 4b)                               \
 217                     _ASM_EXTABLE(2b, 4b)                               \
 218                     : "=r" (err)                                       \
 219                     : "A" (x), "r" (addr), "i" (errret), "0" (err))
 220
 221#define __put_user_asm_ex_u64(x, addr)                                  \
 222        asm volatile("\n"                                               \
 223                     "1:        movl %%eax,0(%1)\n"                     \
 224                     "2:        movl %%edx,4(%1)\n"                     \
 225                     "3:"                                               \
 226                     _ASM_EXTABLE_EX(1b, 2b)                            \
 227                     _ASM_EXTABLE_EX(2b, 3b)                            \
 228                     : : "A" (x), "r" (addr))
 229
 230#define __put_user_x8(x, ptr, __ret_pu)                         \
 231        asm volatile("call __put_user_8" : "=a" (__ret_pu)      \
 232                     : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
 233#else
 234#define __put_user_asm_u64(x, ptr, retval, errret) \
 235        __put_user_asm(x, ptr, retval, "q", "", "er", errret)
 236#define __put_user_asm_ex_u64(x, addr)  \
 237        __put_user_asm_ex(x, addr, "q", "", "er")
 238#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
 239#endif
 240
 241extern void __put_user_bad(void);
 242
 243/*
 244 * Strange magic calling convention: pointer in %ecx,
 245 * value in %eax(:%edx), return value in %eax. clobbers %rbx
 246 */
 247extern void __put_user_1(void);
 248extern void __put_user_2(void);
 249extern void __put_user_4(void);
 250extern void __put_user_8(void);
 251
 252/**
 253 * put_user: - Write a simple value into user space.
 254 * @x:   Value to copy to user space.
 255 * @ptr: Destination address, in user space.
 256 *
 257 * Context: User context only. This function may sleep if pagefaults are
 258 *          enabled.
 259 *
 260 * This macro copies a single simple value from kernel space to user
 261 * space.  It supports simple types like char and int, but not larger
 262 * data types like structures or arrays.
 263 *
 264 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 265 * to the result of dereferencing @ptr.
 266 *
 267 * Returns zero on success, or -EFAULT on error.
 268 */
 269#define put_user(x, ptr)                                        \
 270({                                                              \
 271        int __ret_pu;                                           \
 272        __typeof__(*(ptr)) __pu_val;                            \
 273        __chk_user_ptr(ptr);                                    \
 274        might_fault();                                          \
 275        __pu_val = x;                                           \
 276        switch (sizeof(*(ptr))) {                               \
 277        case 1:                                                 \
 278                __put_user_x(1, __pu_val, ptr, __ret_pu);       \
 279                break;                                          \
 280        case 2:                                                 \
 281                __put_user_x(2, __pu_val, ptr, __ret_pu);       \
 282                break;                                          \
 283        case 4:                                                 \
 284                __put_user_x(4, __pu_val, ptr, __ret_pu);       \
 285                break;                                          \
 286        case 8:                                                 \
 287                __put_user_x8(__pu_val, ptr, __ret_pu);         \
 288                break;                                          \
 289        default:                                                \
 290                __put_user_x(X, __pu_val, ptr, __ret_pu);       \
 291                break;                                          \
 292        }                                                       \
 293        __builtin_expect(__ret_pu, 0);                          \
 294})
 295
 296#define __put_user_size(x, ptr, size, retval, errret)                   \
 297do {                                                                    \
 298        retval = 0;                                                     \
 299        __chk_user_ptr(ptr);                                            \
 300        switch (size) {                                                 \
 301        case 1:                                                         \
 302                __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
 303                break;                                                  \
 304        case 2:                                                         \
 305                __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
 306                break;                                                  \
 307        case 4:                                                         \
 308                __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
 309                break;                                                  \
 310        case 8:                                                         \
 311                __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval,  \
 312                                   errret);                             \
 313                break;                                                  \
 314        default:                                                        \
 315                __put_user_bad();                                       \
 316        }                                                               \
 317} while (0)
 318
 319/*
 320 * This doesn't do __uaccess_begin/end - the exception handling
 321 * around it must do that.
 322 */
 323#define __put_user_size_ex(x, ptr, size)                                \
 324do {                                                                    \
 325        __chk_user_ptr(ptr);                                            \
 326        switch (size) {                                                 \
 327        case 1:                                                         \
 328                __put_user_asm_ex(x, ptr, "b", "b", "iq");              \
 329                break;                                                  \
 330        case 2:                                                         \
 331                __put_user_asm_ex(x, ptr, "w", "w", "ir");              \
 332                break;                                                  \
 333        case 4:                                                         \
 334                __put_user_asm_ex(x, ptr, "l", "k", "ir");              \
 335                break;                                                  \
 336        case 8:                                                         \
 337                __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr);      \
 338                break;                                                  \
 339        default:                                                        \
 340                __put_user_bad();                                       \
 341        }                                                               \
 342} while (0)
 343
 344#ifdef CONFIG_X86_32
 345#define __get_user_asm_u64(x, ptr, retval, errret)      (x) = __get_user_bad()
 346#define __get_user_asm_ex_u64(x, ptr)                   (x) = __get_user_bad()
 347#else
 348#define __get_user_asm_u64(x, ptr, retval, errret) \
 349         __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
 350#define __get_user_asm_ex_u64(x, ptr) \
 351         __get_user_asm_ex(x, ptr, "q", "", "=r")
 352#endif
 353
 354#define __get_user_size(x, ptr, size, retval, errret)                   \
 355do {                                                                    \
 356        retval = 0;                                                     \
 357        __chk_user_ptr(ptr);                                            \
 358        switch (size) {                                                 \
 359        case 1:                                                         \
 360                __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
 361                break;                                                  \
 362        case 2:                                                         \
 363                __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
 364                break;                                                  \
 365        case 4:                                                         \
 366                __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
 367                break;                                                  \
 368        case 8:                                                         \
 369                __get_user_asm_u64(x, ptr, retval, errret);             \
 370                break;                                                  \
 371        default:                                                        \
 372                (x) = __get_user_bad();                                 \
 373        }                                                               \
 374} while (0)
 375
 376#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
 377        asm volatile("\n"                                               \
 378                     "1:        mov"itype" %2,%"rtype"1\n"              \
 379                     "2:\n"                                             \
 380                     ".section .fixup,\"ax\"\n"                         \
 381                     "3:        mov %3,%0\n"                            \
 382                     "  xor"itype" %"rtype"1,%"rtype"1\n"               \
 383                     "  jmp 2b\n"                                       \
 384                     ".previous\n"                                      \
 385                     _ASM_EXTABLE(1b, 3b)                               \
 386                     : "=r" (err), ltype(x)                             \
 387                     : "m" (__m(addr)), "i" (errret), "0" (err))
 388
 389/*
 390 * This doesn't do __uaccess_begin/end - the exception handling
 391 * around it must do that.
 392 */
 393#define __get_user_size_ex(x, ptr, size)                                \
 394do {                                                                    \
 395        __chk_user_ptr(ptr);                                            \
 396        switch (size) {                                                 \
 397        case 1:                                                         \
 398                __get_user_asm_ex(x, ptr, "b", "b", "=q");              \
 399                break;                                                  \
 400        case 2:                                                         \
 401                __get_user_asm_ex(x, ptr, "w", "w", "=r");              \
 402                break;                                                  \
 403        case 4:                                                         \
 404                __get_user_asm_ex(x, ptr, "l", "k", "=r");              \
 405                break;                                                  \
 406        case 8:                                                         \
 407                __get_user_asm_ex_u64(x, ptr);                          \
 408                break;                                                  \
 409        default:                                                        \
 410                (x) = __get_user_bad();                                 \
 411        }                                                               \
 412} while (0)
 413
 414#define __get_user_asm_ex(x, addr, itype, rtype, ltype)                 \
 415        asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
 416                     "2:\n"                                             \
 417                     _ASM_EXTABLE_EX(1b, 2b)                            \
 418                     : ltype(x) : "m" (__m(addr)))
 419
 420#define __put_user_nocheck(x, ptr, size)                        \
 421({                                                              \
 422        int __pu_err;                                           \
 423        __uaccess_begin();                                      \
 424        __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
 425        __uaccess_end();                                        \
 426        __builtin_expect(__pu_err, 0);                          \
 427})
 428
 429#define __get_user_nocheck(x, ptr, size)                                \
 430({                                                                      \
 431        int __gu_err;                                                   \
 432        unsigned long __gu_val;                                         \
 433        __uaccess_begin();                                              \
 434        __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
 435        __uaccess_end();                                                \
 436        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 437        __builtin_expect(__gu_err, 0);                                  \
 438})
 439
 440/* FIXME: this hack is definitely wrong -AK */
 441struct __large_struct { unsigned long buf[100]; };
 442#define __m(x) (*(struct __large_struct __user *)(x))
 443
 444/*
 445 * Tell gcc we read from memory instead of writing: this is because
 446 * we do not write to any memory gcc knows about, so there are no
 447 * aliasing issues.
 448 */
 449#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
 450        asm volatile("\n"                                               \
 451                     "1:        mov"itype" %"rtype"1,%2\n"              \
 452                     "2:\n"                                             \
 453                     ".section .fixup,\"ax\"\n"                         \
 454                     "3:        mov %3,%0\n"                            \
 455                     "  jmp 2b\n"                                       \
 456                     ".previous\n"                                      \
 457                     _ASM_EXTABLE(1b, 3b)                               \
 458                     : "=r"(err)                                        \
 459                     : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
 460
 461#define __put_user_asm_ex(x, addr, itype, rtype, ltype)                 \
 462        asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
 463                     "2:\n"                                             \
 464                     _ASM_EXTABLE_EX(1b, 2b)                            \
 465                     : : ltype(x), "m" (__m(addr)))
 466
 467/*
 468 * uaccess_try and catch
 469 */
 470#define uaccess_try     do {                                            \
 471        current_thread_info()->uaccess_err = 0;                         \
 472        __uaccess_begin();                                              \
 473        barrier();
 474
 475#define uaccess_catch(err)                                              \
 476        __uaccess_end();                                                \
 477        (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0);    \
 478} while (0)
 479
 480/**
 481 * __get_user: - Get a simple variable from user space, with less checking.
 482 * @x:   Variable to store result.
 483 * @ptr: Source address, in user space.
 484 *
 485 * Context: User context only. This function may sleep if pagefaults are
 486 *          enabled.
 487 *
 488 * This macro copies a single simple variable from user space to kernel
 489 * space.  It supports simple types like char and int, but not larger
 490 * data types like structures or arrays.
 491 *
 492 * @ptr must have pointer-to-simple-variable type, and the result of
 493 * dereferencing @ptr must be assignable to @x without a cast.
 494 *
 495 * Caller must check the pointer with access_ok() before calling this
 496 * function.
 497 *
 498 * Returns zero on success, or -EFAULT on error.
 499 * On error, the variable @x is set to zero.
 500 */
 501
 502#define __get_user(x, ptr)                                              \
 503        __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 504
 505/**
 506 * __put_user: - Write a simple value into user space, with less checking.
 507 * @x:   Value to copy to user space.
 508 * @ptr: Destination address, in user space.
 509 *
 510 * Context: User context only. This function may sleep if pagefaults are
 511 *          enabled.
 512 *
 513 * This macro copies a single simple value from kernel space to user
 514 * space.  It supports simple types like char and int, but not larger
 515 * data types like structures or arrays.
 516 *
 517 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
 518 * to the result of dereferencing @ptr.
 519 *
 520 * Caller must check the pointer with access_ok() before calling this
 521 * function.
 522 *
 523 * Returns zero on success, or -EFAULT on error.
 524 */
 525
 526#define __put_user(x, ptr)                                              \
 527        __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 528
 529#define __get_user_unaligned __get_user
 530#define __put_user_unaligned __put_user
 531
 532/*
 533 * {get|put}_user_try and catch
 534 *
 535 * get_user_try {
 536 *      get_user_ex(...);
 537 * } get_user_catch(err)
 538 */
 539#define get_user_try            uaccess_try
 540#define get_user_catch(err)     uaccess_catch(err)
 541
 542#define get_user_ex(x, ptr)     do {                                    \
 543        unsigned long __gue_val;                                        \
 544        __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
 545        (x) = (__force __typeof__(*(ptr)))__gue_val;                    \
 546} while (0)
 547
 548#define put_user_try            uaccess_try
 549#define put_user_catch(err)     uaccess_catch(err)
 550
 551#define put_user_ex(x, ptr)                                             \
 552        __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 553
 554extern unsigned long
 555copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
 556extern __must_check long
 557strncpy_from_user(char *dst, const char __user *src, long count);
 558
 559extern __must_check long strlen_user(const char __user *str);
 560extern __must_check long strnlen_user(const char __user *str, long n);
 561
 562unsigned long __must_check clear_user(void __user *mem, unsigned long len);
 563unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
 564
 565extern void __cmpxchg_wrong_size(void)
 566        __compiletime_error("Bad argument size for cmpxchg");
 567
 568#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size)       \
 569({                                                                      \
 570        int __ret = 0;                                                  \
 571        __typeof__(ptr) __uval = (uval);                                \
 572        __typeof__(*(ptr)) __old = (old);                               \
 573        __typeof__(*(ptr)) __new = (new);                               \
 574        __uaccess_begin();                                              \
 575        switch (size) {                                                 \
 576        case 1:                                                         \
 577        {                                                               \
 578                asm volatile("\n"                                       \
 579                        "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n"          \
 580                        "2:\n"                                          \
 581                        "\t.section .fixup, \"ax\"\n"                   \
 582                        "3:\tmov     %3, %0\n"                          \
 583                        "\tjmp     2b\n"                                \
 584                        "\t.previous\n"                                 \
 585                        _ASM_EXTABLE(1b, 3b)                            \
 586                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 587                        : "i" (-EFAULT), "q" (__new), "1" (__old)       \
 588                        : "memory"                                      \
 589                );                                                      \
 590                break;                                                  \
 591        }                                                               \
 592        case 2:                                                         \
 593        {                                                               \
 594                asm volatile("\n"                                       \
 595                        "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n"          \
 596                        "2:\n"                                          \
 597                        "\t.section .fixup, \"ax\"\n"                   \
 598                        "3:\tmov     %3, %0\n"                          \
 599                        "\tjmp     2b\n"                                \
 600                        "\t.previous\n"                                 \
 601                        _ASM_EXTABLE(1b, 3b)                            \
 602                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 603                        : "i" (-EFAULT), "r" (__new), "1" (__old)       \
 604                        : "memory"                                      \
 605                );                                                      \
 606                break;                                                  \
 607        }                                                               \
 608        case 4:                                                         \
 609        {                                                               \
 610                asm volatile("\n"                                       \
 611                        "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"          \
 612                        "2:\n"                                          \
 613                        "\t.section .fixup, \"ax\"\n"                   \
 614                        "3:\tmov     %3, %0\n"                          \
 615                        "\tjmp     2b\n"                                \
 616                        "\t.previous\n"                                 \
 617                        _ASM_EXTABLE(1b, 3b)                            \
 618                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 619                        : "i" (-EFAULT), "r" (__new), "1" (__old)       \
 620                        : "memory"                                      \
 621                );                                                      \
 622                break;                                                  \
 623        }                                                               \
 624        case 8:                                                         \
 625        {                                                               \
 626                if (!IS_ENABLED(CONFIG_X86_64))                         \
 627                        __cmpxchg_wrong_size();                         \
 628                                                                        \
 629                asm volatile("\n"                                       \
 630                        "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"          \
 631                        "2:\n"                                          \
 632                        "\t.section .fixup, \"ax\"\n"                   \
 633                        "3:\tmov     %3, %0\n"                          \
 634                        "\tjmp     2b\n"                                \
 635                        "\t.previous\n"                                 \
 636                        _ASM_EXTABLE(1b, 3b)                            \
 637                        : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
 638                        : "i" (-EFAULT), "r" (__new), "1" (__old)       \
 639                        : "memory"                                      \
 640                );                                                      \
 641                break;                                                  \
 642        }                                                               \
 643        default:                                                        \
 644                __cmpxchg_wrong_size();                                 \
 645        }                                                               \
 646        __uaccess_end();                                                \
 647        *__uval = __old;                                                \
 648        __ret;                                                          \
 649})
 650
 651#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new)               \
 652({                                                                      \
 653        access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ?                \
 654                __user_atomic_cmpxchg_inatomic((uval), (ptr),           \
 655                                (old), (new), sizeof(*(ptr))) :         \
 656                -EFAULT;                                                \
 657})
 658
 659/*
 660 * movsl can be slow when source and dest are not both 8-byte aligned
 661 */
 662#ifdef CONFIG_X86_INTEL_USERCOPY
 663extern struct movsl_mask {
 664        int mask;
 665} ____cacheline_aligned_in_smp movsl_mask;
 666#endif
 667
 668#define ARCH_HAS_NOCACHE_UACCESS 1
 669
 670#ifdef CONFIG_X86_32
 671# include <asm/uaccess_32.h>
 672#else
 673# include <asm/uaccess_64.h>
 674#endif
 675
 676unsigned long __must_check _copy_from_user(void *to, const void __user *from,
 677                                           unsigned n);
 678unsigned long __must_check _copy_to_user(void __user *to, const void *from,
 679                                         unsigned n);
 680
 681#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
 682# define copy_user_diag __compiletime_error
 683#else
 684# define copy_user_diag __compiletime_warning
 685#endif
 686
 687extern void copy_user_diag("copy_from_user() buffer size is too small")
 688copy_from_user_overflow(void);
 689extern void copy_user_diag("copy_to_user() buffer size is too small")
 690copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
 691
 692#undef copy_user_diag
 693
 694#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
 695
 696extern void
 697__compiletime_warning("copy_from_user() buffer size is not provably correct")
 698__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
 699#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
 700
 701extern void
 702__compiletime_warning("copy_to_user() buffer size is not provably correct")
 703__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
 704#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
 705
 706#else
 707
 708static inline void
 709__copy_from_user_overflow(int size, unsigned long count)
 710{
 711        WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
 712}
 713
 714#define __copy_to_user_overflow __copy_from_user_overflow
 715
 716#endif
 717
 718static inline unsigned long __must_check
 719copy_from_user(void *to, const void __user *from, unsigned long n)
 720{
 721        int sz = __compiletime_object_size(to);
 722
 723        might_fault();
 724
 725        kasan_check_write(to, n);
 726
 727        /*
 728         * While we would like to have the compiler do the checking for us
 729         * even in the non-constant size case, any false positives there are
 730         * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
 731         * without - the [hopefully] dangerous looking nature of the warning
 732         * would make people go look at the respecitive call sites over and
 733         * over again just to find that there's no problem).
 734         *
 735         * And there are cases where it's just not realistic for the compiler
 736         * to prove the count to be in range. For example when multiple call
 737         * sites of a helper function - perhaps in different source files -
 738         * all doing proper range checking, yet the helper function not doing
 739         * so again.
 740         *
 741         * Therefore limit the compile time checking to the constant size
 742         * case, and do only runtime checking for non-constant sizes.
 743         */
 744
 745        if (likely(sz < 0 || sz >= n))
 746                n = _copy_from_user(to, from, n);
 747        else if(__builtin_constant_p(n))
 748                copy_from_user_overflow();
 749        else
 750                __copy_from_user_overflow(sz, n);
 751
 752        return n;
 753}
 754
 755static inline unsigned long __must_check
 756copy_to_user(void __user *to, const void *from, unsigned long n)
 757{
 758        int sz = __compiletime_object_size(from);
 759
 760        kasan_check_read(from, n);
 761
 762        might_fault();
 763
 764        /* See the comment in copy_from_user() above. */
 765        if (likely(sz < 0 || sz >= n))
 766                n = _copy_to_user(to, from, n);
 767        else if(__builtin_constant_p(n))
 768                copy_to_user_overflow();
 769        else
 770                __copy_to_user_overflow(sz, n);
 771
 772        return n;
 773}
 774
 775#undef __copy_from_user_overflow
 776#undef __copy_to_user_overflow
 777
 778/*
 779 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
 780 * nested NMI paths are careful to preserve CR2.
 781 *
 782 * Caller must use pagefault_enable/disable, or run in interrupt context,
 783 * and also do a uaccess_ok() check
 784 */
 785#define __copy_from_user_nmi __copy_from_user_inatomic
 786
 787/*
 788 * The "unsafe" user accesses aren't really "unsafe", but the naming
 789 * is a big fat warning: you have to not only do the access_ok()
 790 * checking before using them, but you have to surround them with the
 791 * user_access_begin/end() pair.
 792 */
 793#define user_access_begin()     __uaccess_begin()
 794#define user_access_end()       __uaccess_end()
 795
 796#define unsafe_put_user(x, ptr)                                         \
 797({                                                                              \
 798        int __pu_err;                                                           \
 799        __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT);         \
 800        __builtin_expect(__pu_err, 0);                                          \
 801})
 802
 803#define unsafe_get_user(x, ptr)                                         \
 804({                                                                              \
 805        int __gu_err;                                                           \
 806        unsigned long __gu_val;                                                 \
 807        __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT);    \
 808        (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
 809        __builtin_expect(__gu_err, 0);                                          \
 810})
 811
 812#endif /* _ASM_X86_UACCESS_H */
 813
 814
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.