linux/arch/x86/include/asm/uaccess_64.h
<<
>>
Prefs
   1#ifndef _ASM_X86_UACCESS_64_H
   2#define _ASM_X86_UACCESS_64_H
   3
   4/*
   5 * User space memory access functions
   6 */
   7#include <linux/compiler.h>
   8#include <linux/errno.h>
   9#include <linux/prefetch.h>
  10#include <linux/lockdep.h>
  11#include <asm/page.h>
  12
  13/*
  14 * Copy To/From Userspace
  15 */
  16
  17/* Handles exceptions in both to and from, but doesn't do access_ok */
  18__must_check unsigned long
  19copy_user_generic(void *to, const void *from, unsigned len);
  20
  21__must_check unsigned long
  22copy_to_user(void __user *to, const void *from, unsigned len);
  23__must_check unsigned long
  24copy_from_user(void *to, const void __user *from, unsigned len);
  25__must_check unsigned long
  26copy_in_user(void __user *to, const void __user *from, unsigned len);
  27
  28static __always_inline __must_check
  29int __copy_from_user(void *dst, const void __user *src, unsigned size)
  30{
  31        int ret = 0;
  32        if (!__builtin_constant_p(size))
  33                return copy_user_generic(dst, (__force void *)src, size);
  34        switch (size) {
  35        case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
  36                              ret, "b", "b", "=q", 1);
  37                return ret;
  38        case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
  39                              ret, "w", "w", "=r", 2);
  40                return ret;
  41        case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
  42                              ret, "l", "k", "=r", 4);
  43                return ret;
  44        case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
  45                              ret, "q", "", "=r", 8);
  46                return ret;
  47        case 10:
  48                __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  49                               ret, "q", "", "=r", 10);
  50                if (unlikely(ret))
  51                        return ret;
  52                __get_user_asm(*(u16 *)(8 + (char *)dst),
  53                               (u16 __user *)(8 + (char __user *)src),
  54                               ret, "w", "w", "=r", 2);
  55                return ret;
  56        case 16:
  57                __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  58                               ret, "q", "", "=r", 16);
  59                if (unlikely(ret))
  60                        return ret;
  61                __get_user_asm(*(u64 *)(8 + (char *)dst),
  62                               (u64 __user *)(8 + (char __user *)src),
  63                               ret, "q", "", "=r", 8);
  64                return ret;
  65        default:
  66                return copy_user_generic(dst, (__force void *)src, size);
  67        }
  68}
  69
  70static __always_inline __must_check
  71int __copy_to_user(void __user *dst, const void *src, unsigned size)
  72{
  73        int ret = 0;
  74        if (!__builtin_constant_p(size))
  75                return copy_user_generic((__force void *)dst, src, size);
  76        switch (size) {
  77        case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
  78                              ret, "b", "b", "iq", 1);
  79                return ret;
  80        case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
  81                              ret, "w", "w", "ir", 2);
  82                return ret;
  83        case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
  84                              ret, "l", "k", "ir", 4);
  85                return ret;
  86        case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
  87                              ret, "q", "", "ir", 8);
  88                return ret;
  89        case 10:
  90                __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  91                               ret, "q", "", "ir", 10);
  92                if (unlikely(ret))
  93                        return ret;
  94                asm("":::"memory");
  95                __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
  96                               ret, "w", "w", "ir", 2);
  97                return ret;
  98        case 16:
  99                __put_user_asm(*(u64 *)src, (u64 __user *)dst,
 100                               ret, "q", "", "ir", 16);
 101                if (unlikely(ret))
 102                        return ret;
 103                asm("":::"memory");
 104                __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
 105                               ret, "q", "", "ir", 8);
 106                return ret;
 107        default:
 108                return copy_user_generic((__force void *)dst, src, size);
 109        }
 110}
 111
 112static __always_inline __must_check
 113int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
 114{
 115        int ret = 0;
 116        if (!__builtin_constant_p(size))
 117                return copy_user_generic((__force void *)dst,
 118                                         (__force void *)src, size);
 119        switch (size) {
 120        case 1: {
 121                u8 tmp;
 122                __get_user_asm(tmp, (u8 __user *)src,
 123                               ret, "b", "b", "=q", 1);
 124                if (likely(!ret))
 125                        __put_user_asm(tmp, (u8 __user *)dst,
 126                                       ret, "b", "b", "iq", 1);
 127                return ret;
 128        }
 129        case 2: {
 130                u16 tmp;
 131                __get_user_asm(tmp, (u16 __user *)src,
 132                               ret, "w", "w", "=r", 2);
 133                if (likely(!ret))
 134                        __put_user_asm(tmp, (u16 __user *)dst,
 135                                       ret, "w", "w", "ir", 2);
 136                return ret;
 137        }
 138
 139        case 4: {
 140                u32 tmp;
 141                __get_user_asm(tmp, (u32 __user *)src,
 142                               ret, "l", "k", "=r", 4);
 143                if (likely(!ret))
 144                        __put_user_asm(tmp, (u32 __user *)dst,
 145                                       ret, "l", "k", "ir", 4);
 146                return ret;
 147        }
 148        case 8: {
 149                u64 tmp;
 150                __get_user_asm(tmp, (u64 __user *)src,
 151                               ret, "q", "", "=r", 8);
 152                if (likely(!ret))
 153                        __put_user_asm(tmp, (u64 __user *)dst,
 154                                       ret, "q", "", "ir", 8);
 155                return ret;
 156        }
 157        default:
 158                return copy_user_generic((__force void *)dst,
 159                                         (__force void *)src, size);
 160        }
 161}
 162
 163__must_check long
 164strncpy_from_user(char *dst, const char __user *src, long count);
 165__must_check long
 166__strncpy_from_user(char *dst, const char __user *src, long count);
 167__must_check long strnlen_user(const char __user *str, long n);
 168__must_check long __strnlen_user(const char __user *str, long n);
 169__must_check long strlen_user(const char __user *str);
 170__must_check unsigned long clear_user(void __user *mem, unsigned long len);
 171__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
 172
 173__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
 174                                            unsigned size);
 175
 176static __must_check __always_inline int
 177__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
 178{
 179        return copy_user_generic((__force void *)dst, src, size);
 180}
 181
 182extern long __copy_user_nocache(void *dst, const void __user *src,
 183                                unsigned size, int zerorest);
 184
 185static inline int __copy_from_user_nocache(void *dst, const void __user *src,
 186                                           unsigned size)
 187{
 188        might_sleep();
 189        return __copy_user_nocache(dst, src, size, 1);
 190}
 191
 192static inline int __copy_from_user_inatomic_nocache(void *dst,
 193                                                    const void __user *src,
 194                                                    unsigned size)
 195{
 196        return __copy_user_nocache(dst, src, size, 0);
 197}
 198
 199unsigned long
 200copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
 201
 202#endif /* _ASM_X86_UACCESS_64_H */
 203
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.