linux/arch/arm64/mm/kasan_init.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * This file contains kasan initialization code for ARM64.
   4 *
   5 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
   6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
   7 */
   8
   9#define pr_fmt(fmt) "kasan: " fmt
  10#include <linux/kasan.h>
  11#include <linux/kernel.h>
  12#include <linux/sched/task.h>
  13#include <linux/memblock.h>
  14#include <linux/start_kernel.h>
  15#include <linux/mm.h>
  16
  17#include <asm/mmu_context.h>
  18#include <asm/kernel-pgtable.h>
  19#include <asm/page.h>
  20#include <asm/pgalloc.h>
  21#include <asm/sections.h>
  22#include <asm/tlbflush.h>
  23
  24#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
  25
  26static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
  27
  28/*
  29 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
  30 * directly on kernel symbols (bm_p*d). All the early functions are called too
  31 * early to use lm_alias so __p*d_populate functions must be used to populate
  32 * with the physical address from __pa_symbol.
  33 */
  34
  35static phys_addr_t __init kasan_alloc_zeroed_page(int node)
  36{
  37        void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
  38                                              __pa(MAX_DMA_ADDRESS),
  39                                              MEMBLOCK_ALLOC_KASAN, node);
  40        if (!p)
  41                panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
  42                      __func__, PAGE_SIZE, PAGE_SIZE, node,
  43                      __pa(MAX_DMA_ADDRESS));
  44
  45        return __pa(p);
  46}
  47
  48static phys_addr_t __init kasan_alloc_raw_page(int node)
  49{
  50        void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
  51                                                __pa(MAX_DMA_ADDRESS),
  52                                                MEMBLOCK_ALLOC_KASAN, node);
  53        if (!p)
  54                panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
  55                      __func__, PAGE_SIZE, PAGE_SIZE, node,
  56                      __pa(MAX_DMA_ADDRESS));
  57
  58        return __pa(p);
  59}
  60
  61static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
  62                                      bool early)
  63{
  64        if (pmd_none(READ_ONCE(*pmdp))) {
  65                phys_addr_t pte_phys = early ?
  66                                __pa_symbol(kasan_early_shadow_pte)
  67                                        : kasan_alloc_zeroed_page(node);
  68                __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
  69        }
  70
  71        return early ? pte_offset_kimg(pmdp, addr)
  72                     : pte_offset_kernel(pmdp, addr);
  73}
  74
  75static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
  76                                      bool early)
  77{
  78        if (pud_none(READ_ONCE(*pudp))) {
  79                phys_addr_t pmd_phys = early ?
  80                                __pa_symbol(kasan_early_shadow_pmd)
  81                                        : kasan_alloc_zeroed_page(node);
  82                __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
  83        }
  84
  85        return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
  86}
  87
  88static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
  89                                      bool early)
  90{
  91        if (p4d_none(READ_ONCE(*p4dp))) {
  92                phys_addr_t pud_phys = early ?
  93                                __pa_symbol(kasan_early_shadow_pud)
  94                                        : kasan_alloc_zeroed_page(node);
  95                __p4d_populate(p4dp, pud_phys, P4D_TYPE_TABLE);
  96        }
  97
  98        return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
  99}
 100
 101static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
 102                                      unsigned long end, int node, bool early)
 103{
 104        unsigned long next;
 105        pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
 106
 107        do {
 108                phys_addr_t page_phys = early ?
 109                                __pa_symbol(kasan_early_shadow_page)
 110                                        : kasan_alloc_raw_page(node);
 111                if (!early)
 112                        memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
 113                next = addr + PAGE_SIZE;
 114                set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
 115        } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
 116}
 117
 118static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
 119                                      unsigned long end, int node, bool early)
 120{
 121        unsigned long next;
 122        pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
 123
 124        do {
 125                next = pmd_addr_end(addr, end);
 126                kasan_pte_populate(pmdp, addr, next, node, early);
 127        } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
 128}
 129
 130static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
 131                                      unsigned long end, int node, bool early)
 132{
 133        unsigned long next;
 134        pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
 135
 136        do {
 137                next = pud_addr_end(addr, end);
 138                kasan_pmd_populate(pudp, addr, next, node, early);
 139        } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
 140}
 141
 142static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
 143                                      unsigned long end, int node, bool early)
 144{
 145        unsigned long next;
 146        p4d_t *p4dp = p4d_offset(pgdp, addr);
 147
 148        do {
 149                next = p4d_addr_end(addr, end);
 150                kasan_pud_populate(p4dp, addr, next, node, early);
 151        } while (p4dp++, addr = next, addr != end);
 152}
 153
 154static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
 155                                      int node, bool early)
 156{
 157        unsigned long next;
 158        pgd_t *pgdp;
 159
 160        pgdp = pgd_offset_k(addr);
 161        do {
 162                next = pgd_addr_end(addr, end);
 163                kasan_p4d_populate(pgdp, addr, next, node, early);
 164        } while (pgdp++, addr = next, addr != end);
 165}
 166
 167/* The early shadow maps everything to a single page of zeroes */
 168asmlinkage void __init kasan_early_init(void)
 169{
 170        BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
 171                KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
 172        BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
 173        BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
 174        BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
 175        kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
 176                           true);
 177}
 178
 179/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
 180static void __init kasan_map_populate(unsigned long start, unsigned long end,
 181                                      int node)
 182{
 183        kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
 184}
 185
 186/*
 187 * Copy the current shadow region into a new pgdir.
 188 */
 189void __init kasan_copy_shadow(pgd_t *pgdir)
 190{
 191        pgd_t *pgdp, *pgdp_new, *pgdp_end;
 192
 193        pgdp = pgd_offset_k(KASAN_SHADOW_START);
 194        pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
 195        pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
 196        do {
 197                set_pgd(pgdp_new, READ_ONCE(*pgdp));
 198        } while (pgdp++, pgdp_new++, pgdp != pgdp_end);
 199}
 200
 201static void __init clear_pgds(unsigned long start,
 202                        unsigned long end)
 203{
 204        /*
 205         * Remove references to kasan page tables from
 206         * swapper_pg_dir. pgd_clear() can't be used
 207         * here because it's nop on 2,3-level pagetable setups
 208         */
 209        for (; start < end; start += PGDIR_SIZE)
 210                set_pgd(pgd_offset_k(start), __pgd(0));
 211}
 212
 213static void __init kasan_init_shadow(void)
 214{
 215        u64 kimg_shadow_start, kimg_shadow_end;
 216        u64 mod_shadow_start, mod_shadow_end;
 217        u64 vmalloc_shadow_end;
 218        phys_addr_t pa_start, pa_end;
 219        u64 i;
 220
 221        kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK;
 222        kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
 223
 224        mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
 225        mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
 226
 227        vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
 228
 229        /*
 230         * We are going to perform proper setup of shadow memory.
 231         * At first we should unmap early shadow (clear_pgds() call below).
 232         * However, instrumented code couldn't execute without shadow memory.
 233         * tmp_pg_dir used to keep early shadow mapped until full shadow
 234         * setup will be finished.
 235         */
 236        memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
 237        dsb(ishst);
 238        cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
 239
 240        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 241
 242        kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
 243                           early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START))));
 244
 245        kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
 246                                   (void *)mod_shadow_start);
 247
 248        if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
 249                BUILD_BUG_ON(VMALLOC_START != MODULES_END);
 250                kasan_populate_early_shadow((void *)vmalloc_shadow_end,
 251                                            (void *)KASAN_SHADOW_END);
 252        } else {
 253                kasan_populate_early_shadow((void *)kimg_shadow_end,
 254                                            (void *)KASAN_SHADOW_END);
 255                if (kimg_shadow_start > mod_shadow_end)
 256                        kasan_populate_early_shadow((void *)mod_shadow_end,
 257                                                    (void *)kimg_shadow_start);
 258        }
 259
 260        for_each_mem_range(i, &pa_start, &pa_end) {
 261                void *start = (void *)__phys_to_virt(pa_start);
 262                void *end = (void *)__phys_to_virt(pa_end);
 263
 264                if (start >= end)
 265                        break;
 266
 267                kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
 268                                   (unsigned long)kasan_mem_to_shadow(end),
 269                                   early_pfn_to_nid(virt_to_pfn(start)));
 270        }
 271
 272        /*
 273         * KAsan may reuse the contents of kasan_early_shadow_pte directly,
 274         * so we should make sure that it maps the zero page read-only.
 275         */
 276        for (i = 0; i < PTRS_PER_PTE; i++)
 277                set_pte(&kasan_early_shadow_pte[i],
 278                        pfn_pte(sym_to_pfn(kasan_early_shadow_page),
 279                                PAGE_KERNEL_RO));
 280
 281        memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
 282        cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
 283}
 284
 285static void __init kasan_init_depth(void)
 286{
 287        init_task.kasan_depth = 0;
 288}
 289
 290void __init kasan_init(void)
 291{
 292        kasan_init_shadow();
 293        kasan_init_depth();
 294#if defined(CONFIG_KASAN_GENERIC)
 295        /* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
 296        pr_info("KernelAddressSanitizer initialized\n");
 297#endif
 298}
 299
 300#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
 301
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.