linux/drivers/misc/lkdtm/usercopy.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * This is for all the tests related to copy_to_user() and copy_from_user()
   4 * hardening.
   5 */
   6#include "lkdtm.h"
   7#include <linux/slab.h>
   8#include <linux/vmalloc.h>
   9#include <linux/sched/task_stack.h>
  10#include <linux/mman.h>
  11#include <linux/uaccess.h>
  12#include <asm/cacheflush.h>
  13
  14/*
  15 * Many of the tests here end up using const sizes, but those would
  16 * normally be ignored by hardened usercopy, so force the compiler
  17 * into choosing the non-const path to make sure we trigger the
  18 * hardened usercopy checks by added "unconst" to all the const copies,
  19 * and making sure "cache_size" isn't optimized into a const.
  20 */
  21static volatile size_t unconst;
  22static volatile size_t cache_size = 1024;
  23static struct kmem_cache *whitelist_cache;
  24
  25static const unsigned char test_text[] = "This is a test.\n";
  26
  27/*
  28 * Instead of adding -Wno-return-local-addr, just pass the stack address
  29 * through a function to obfuscate it from the compiler.
  30 */
  31static noinline unsigned char *trick_compiler(unsigned char *stack)
  32{
  33        return stack + 0;
  34}
  35
  36static noinline unsigned char *do_usercopy_stack_callee(int value)
  37{
  38        unsigned char buf[32];
  39        int i;
  40
  41        /* Exercise stack to avoid everything living in registers. */
  42        for (i = 0; i < sizeof(buf); i++) {
  43                buf[i] = value & 0xff;
  44        }
  45
  46        return trick_compiler(buf);
  47}
  48
  49static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
  50{
  51        unsigned long user_addr;
  52        unsigned char good_stack[32];
  53        unsigned char *bad_stack;
  54        int i;
  55
  56        /* Exercise stack to avoid everything living in registers. */
  57        for (i = 0; i < sizeof(good_stack); i++)
  58                good_stack[i] = test_text[i % sizeof(test_text)];
  59
  60        /* This is a pointer to outside our current stack frame. */
  61        if (bad_frame) {
  62                bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
  63        } else {
  64                /* Put start address just inside stack. */
  65                bad_stack = task_stack_page(current) + THREAD_SIZE;
  66                bad_stack -= sizeof(unsigned long);
  67        }
  68
  69        user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
  70                            PROT_READ | PROT_WRITE | PROT_EXEC,
  71                            MAP_ANONYMOUS | MAP_PRIVATE, 0);
  72        if (user_addr >= TASK_SIZE) {
  73                pr_warn("Failed to allocate user memory\n");
  74                return;
  75        }
  76
  77        if (to_user) {
  78                pr_info("attempting good copy_to_user of local stack\n");
  79                if (copy_to_user((void __user *)user_addr, good_stack,
  80                                 unconst + sizeof(good_stack))) {
  81                        pr_warn("copy_to_user failed unexpectedly?!\n");
  82                        goto free_user;
  83                }
  84
  85                pr_info("attempting bad copy_to_user of distant stack\n");
  86                if (copy_to_user((void __user *)user_addr, bad_stack,
  87                                 unconst + sizeof(good_stack))) {
  88                        pr_warn("copy_to_user failed, but lacked Oops\n");
  89                        goto free_user;
  90                }
  91        } else {
  92                /*
  93                 * There isn't a safe way to not be protected by usercopy
  94                 * if we're going to write to another thread's stack.
  95                 */
  96                if (!bad_frame)
  97                        goto free_user;
  98
  99                pr_info("attempting good copy_from_user of local stack\n");
 100                if (copy_from_user(good_stack, (void __user *)user_addr,
 101                                   unconst + sizeof(good_stack))) {
 102                        pr_warn("copy_from_user failed unexpectedly?!\n");
 103                        goto free_user;
 104                }
 105
 106                pr_info("attempting bad copy_from_user of distant stack\n");
 107                if (copy_from_user(bad_stack, (void __user *)user_addr,
 108                                   unconst + sizeof(good_stack))) {
 109                        pr_warn("copy_from_user failed, but lacked Oops\n");
 110                        goto free_user;
 111                }
 112        }
 113
 114free_user:
 115        vm_munmap(user_addr, PAGE_SIZE);
 116}
 117
 118/*
 119 * This checks for whole-object size validation with hardened usercopy,
 120 * with or without usercopy whitelisting.
 121 */
 122static void do_usercopy_heap_size(bool to_user)
 123{
 124        unsigned long user_addr;
 125        unsigned char *one, *two;
 126        void __user *test_user_addr;
 127        void *test_kern_addr;
 128        size_t size = unconst + 1024;
 129
 130        one = kmalloc(size, GFP_KERNEL);
 131        two = kmalloc(size, GFP_KERNEL);
 132        if (!one || !two) {
 133                pr_warn("Failed to allocate kernel memory\n");
 134                goto free_kernel;
 135        }
 136
 137        user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
 138                            PROT_READ | PROT_WRITE | PROT_EXEC,
 139                            MAP_ANONYMOUS | MAP_PRIVATE, 0);
 140        if (user_addr >= TASK_SIZE) {
 141                pr_warn("Failed to allocate user memory\n");
 142                goto free_kernel;
 143        }
 144
 145        memset(one, 'A', size);
 146        memset(two, 'B', size);
 147
 148        test_user_addr = (void __user *)(user_addr + 16);
 149        test_kern_addr = one + 16;
 150
 151        if (to_user) {
 152                pr_info("attempting good copy_to_user of correct size\n");
 153                if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
 154                        pr_warn("copy_to_user failed unexpectedly?!\n");
 155                        goto free_user;
 156                }
 157
 158                pr_info("attempting bad copy_to_user of too large size\n");
 159                if (copy_to_user(test_user_addr, test_kern_addr, size)) {
 160                        pr_warn("copy_to_user failed, but lacked Oops\n");
 161                        goto free_user;
 162                }
 163        } else {
 164                pr_info("attempting good copy_from_user of correct size\n");
 165                if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
 166                        pr_warn("copy_from_user failed unexpectedly?!\n");
 167                        goto free_user;
 168                }
 169
 170                pr_info("attempting bad copy_from_user of too large size\n");
 171                if (copy_from_user(test_kern_addr, test_user_addr, size)) {
 172                        pr_warn("copy_from_user failed, but lacked Oops\n");
 173                        goto free_user;
 174                }
 175        }
 176        pr_err("FAIL: bad usercopy not detected!\n");
 177        pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
 178
 179free_user:
 180        vm_munmap(user_addr, PAGE_SIZE);
 181free_kernel:
 182        kfree(one);
 183        kfree(two);
 184}
 185
 186/*
 187 * This checks for the specific whitelist window within an object. If this
 188 * test passes, then do_usercopy_heap_size() tests will pass too.
 189 */
 190static void do_usercopy_heap_whitelist(bool to_user)
 191{
 192        unsigned long user_alloc;
 193        unsigned char *buf = NULL;
 194        unsigned char __user *user_addr;
 195        size_t offset, size;
 196
 197        /* Make sure cache was prepared. */
 198        if (!whitelist_cache) {
 199                pr_warn("Failed to allocate kernel cache\n");
 200                return;
 201        }
 202
 203        /*
 204         * Allocate a buffer with a whitelisted window in the buffer.
 205         */
 206        buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
 207        if (!buf) {
 208                pr_warn("Failed to allocate buffer from whitelist cache\n");
 209                goto free_alloc;
 210        }
 211
 212        /* Allocate user memory we'll poke at. */
 213        user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
 214                            PROT_READ | PROT_WRITE | PROT_EXEC,
 215                            MAP_ANONYMOUS | MAP_PRIVATE, 0);
 216        if (user_alloc >= TASK_SIZE) {
 217                pr_warn("Failed to allocate user memory\n");
 218                goto free_alloc;
 219        }
 220        user_addr = (void __user *)user_alloc;
 221
 222        memset(buf, 'B', cache_size);
 223
 224        /* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
 225        offset = (cache_size / 4) + unconst;
 226        size = (cache_size / 16) + unconst;
 227
 228        if (to_user) {
 229                pr_info("attempting good copy_to_user inside whitelist\n");
 230                if (copy_to_user(user_addr, buf + offset, size)) {
 231                        pr_warn("copy_to_user failed unexpectedly?!\n");
 232                        goto free_user;
 233                }
 234
 235                pr_info("attempting bad copy_to_user outside whitelist\n");
 236                if (copy_to_user(user_addr, buf + offset - 1, size)) {
 237                        pr_warn("copy_to_user failed, but lacked Oops\n");
 238                        goto free_user;
 239                }
 240        } else {
 241                pr_info("attempting good copy_from_user inside whitelist\n");
 242                if (copy_from_user(buf + offset, user_addr, size)) {
 243                        pr_warn("copy_from_user failed unexpectedly?!\n");
 244                        goto free_user;
 245                }
 246
 247                pr_info("attempting bad copy_from_user outside whitelist\n");
 248                if (copy_from_user(buf + offset - 1, user_addr, size)) {
 249                        pr_warn("copy_from_user failed, but lacked Oops\n");
 250                        goto free_user;
 251                }
 252        }
 253        pr_err("FAIL: bad usercopy not detected!\n");
 254        pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
 255
 256free_user:
 257        vm_munmap(user_alloc, PAGE_SIZE);
 258free_alloc:
 259        if (buf)
 260                kmem_cache_free(whitelist_cache, buf);
 261}
 262
 263/* Callable tests. */
 264void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
 265{
 266        do_usercopy_heap_size(true);
 267}
 268
 269void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
 270{
 271        do_usercopy_heap_size(false);
 272}
 273
 274void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
 275{
 276        do_usercopy_heap_whitelist(true);
 277}
 278
 279void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
 280{
 281        do_usercopy_heap_whitelist(false);
 282}
 283
 284void lkdtm_USERCOPY_STACK_FRAME_TO(void)
 285{
 286        do_usercopy_stack(true, true);
 287}
 288
 289void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
 290{
 291        do_usercopy_stack(false, true);
 292}
 293
 294void lkdtm_USERCOPY_STACK_BEYOND(void)
 295{
 296        do_usercopy_stack(true, false);
 297}
 298
 299void lkdtm_USERCOPY_KERNEL(void)
 300{
 301        unsigned long user_addr;
 302
 303        user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
 304                            PROT_READ | PROT_WRITE | PROT_EXEC,
 305                            MAP_ANONYMOUS | MAP_PRIVATE, 0);
 306        if (user_addr >= TASK_SIZE) {
 307                pr_warn("Failed to allocate user memory\n");
 308                return;
 309        }
 310
 311        pr_info("attempting good copy_to_user from kernel rodata: %px\n",
 312                test_text);
 313        if (copy_to_user((void __user *)user_addr, test_text,
 314                         unconst + sizeof(test_text))) {
 315                pr_warn("copy_to_user failed unexpectedly?!\n");
 316                goto free_user;
 317        }
 318
 319        pr_info("attempting bad copy_to_user from kernel text: %px\n",
 320                vm_mmap);
 321        if (copy_to_user((void __user *)user_addr, function_nocfi(vm_mmap),
 322                         unconst + PAGE_SIZE)) {
 323                pr_warn("copy_to_user failed, but lacked Oops\n");
 324                goto free_user;
 325        }
 326        pr_err("FAIL: bad copy_to_user() not detected!\n");
 327        pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
 328
 329free_user:
 330        vm_munmap(user_addr, PAGE_SIZE);
 331}
 332
 333void __init lkdtm_usercopy_init(void)
 334{
 335        /* Prepare cache that lacks SLAB_USERCOPY flag. */
 336        whitelist_cache =
 337                kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
 338                                           0, 0,
 339                                           cache_size / 4,
 340                                           cache_size / 16,
 341                                           NULL);
 342}
 343
 344void __exit lkdtm_usercopy_exit(void)
 345{
 346        kmem_cache_destroy(whitelist_cache);
 347}
 348