linux/arch/s390/mm/kasan_init.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kasan.h>
   3#include <linux/sched/task.h>
   4#include <linux/memblock.h>
   5#include <linux/pgtable.h>
   6#include <asm/pgalloc.h>
   7#include <asm/kasan.h>
   8#include <asm/mem_detect.h>
   9#include <asm/processor.h>
  10#include <asm/sclp.h>
  11#include <asm/facility.h>
  12#include <asm/sections.h>
  13#include <asm/setup.h>
  14#include <asm/uv.h>
  15
  16static unsigned long segment_pos __initdata;
  17static unsigned long segment_low __initdata;
  18static unsigned long pgalloc_pos __initdata;
  19static unsigned long pgalloc_low __initdata;
  20static unsigned long pgalloc_freeable __initdata;
  21static bool has_edat __initdata;
  22static bool has_nx __initdata;
  23
  24#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
  25
  26static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
  27
  28static void __init kasan_early_panic(const char *reason)
  29{
  30        sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
  31        sclp_early_printk(reason);
  32        disabled_wait();
  33}
  34
  35static void * __init kasan_early_alloc_segment(void)
  36{
  37        segment_pos -= _SEGMENT_SIZE;
  38
  39        if (segment_pos < segment_low)
  40                kasan_early_panic("out of memory during initialisation\n");
  41
  42        return (void *)segment_pos;
  43}
  44
  45static void * __init kasan_early_alloc_pages(unsigned int order)
  46{
  47        pgalloc_pos -= (PAGE_SIZE << order);
  48
  49        if (pgalloc_pos < pgalloc_low)
  50                kasan_early_panic("out of memory during initialisation\n");
  51
  52        return (void *)pgalloc_pos;
  53}
  54
  55static void * __init kasan_early_crst_alloc(unsigned long val)
  56{
  57        unsigned long *table;
  58
  59        table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
  60        if (table)
  61                crst_table_init(table, val);
  62        return table;
  63}
  64
  65static pte_t * __init kasan_early_pte_alloc(void)
  66{
  67        static void *pte_leftover;
  68        pte_t *pte;
  69
  70        BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
  71
  72        if (!pte_leftover) {
  73                pte_leftover = kasan_early_alloc_pages(0);
  74                pte = pte_leftover + _PAGE_TABLE_SIZE;
  75        } else {
  76                pte = pte_leftover;
  77                pte_leftover = NULL;
  78        }
  79        memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
  80        return pte;
  81}
  82
  83enum populate_mode {
  84        POPULATE_ONE2ONE,
  85        POPULATE_MAP,
  86        POPULATE_ZERO_SHADOW,
  87        POPULATE_SHALLOW
  88};
  89static void __init kasan_early_pgtable_populate(unsigned long address,
  90                                                unsigned long end,
  91                                                enum populate_mode mode)
  92{
  93        unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
  94        pgd_t *pg_dir;
  95        p4d_t *p4_dir;
  96        pud_t *pu_dir;
  97        pmd_t *pm_dir;
  98        pte_t *pt_dir;
  99
 100        pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
 101        if (!has_nx)
 102                pgt_prot_zero &= ~_PAGE_NOEXEC;
 103        pgt_prot = pgprot_val(PAGE_KERNEL);
 104        sgt_prot = pgprot_val(SEGMENT_KERNEL);
 105        if (!has_nx || mode == POPULATE_ONE2ONE) {
 106                pgt_prot &= ~_PAGE_NOEXEC;
 107                sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
 108        }
 109
 110        while (address < end) {
 111                pg_dir = pgd_offset_k(address);
 112                if (pgd_none(*pg_dir)) {
 113                        if (mode == POPULATE_ZERO_SHADOW &&
 114                            IS_ALIGNED(address, PGDIR_SIZE) &&
 115                            end - address >= PGDIR_SIZE) {
 116                                pgd_populate(&init_mm, pg_dir,
 117                                                kasan_early_shadow_p4d);
 118                                address = (address + PGDIR_SIZE) & PGDIR_MASK;
 119                                continue;
 120                        }
 121                        p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
 122                        pgd_populate(&init_mm, pg_dir, p4_dir);
 123                }
 124
 125                if (mode == POPULATE_SHALLOW) {
 126                        address = (address + P4D_SIZE) & P4D_MASK;
 127                        continue;
 128                }
 129
 130                p4_dir = p4d_offset(pg_dir, address);
 131                if (p4d_none(*p4_dir)) {
 132                        if (mode == POPULATE_ZERO_SHADOW &&
 133                            IS_ALIGNED(address, P4D_SIZE) &&
 134                            end - address >= P4D_SIZE) {
 135                                p4d_populate(&init_mm, p4_dir,
 136                                                kasan_early_shadow_pud);
 137                                address = (address + P4D_SIZE) & P4D_MASK;
 138                                continue;
 139                        }
 140                        pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
 141                        p4d_populate(&init_mm, p4_dir, pu_dir);
 142                }
 143
 144                pu_dir = pud_offset(p4_dir, address);
 145                if (pud_none(*pu_dir)) {
 146                        if (mode == POPULATE_ZERO_SHADOW &&
 147                            IS_ALIGNED(address, PUD_SIZE) &&
 148                            end - address >= PUD_SIZE) {
 149                                pud_populate(&init_mm, pu_dir,
 150                                                kasan_early_shadow_pmd);
 151                                address = (address + PUD_SIZE) & PUD_MASK;
 152                                continue;
 153                        }
 154                        pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
 155                        pud_populate(&init_mm, pu_dir, pm_dir);
 156                }
 157
 158                pm_dir = pmd_offset(pu_dir, address);
 159                if (pmd_none(*pm_dir)) {
 160                        if (mode == POPULATE_ZERO_SHADOW &&
 161                            IS_ALIGNED(address, PMD_SIZE) &&
 162                            end - address >= PMD_SIZE) {
 163                                pmd_populate(&init_mm, pm_dir,
 164                                                kasan_early_shadow_pte);
 165                                address = (address + PMD_SIZE) & PMD_MASK;
 166                                continue;
 167                        }
 168                        /* the first megabyte of 1:1 is mapped with 4k pages */
 169                        if (has_edat && address && end - address >= PMD_SIZE &&
 170                            mode != POPULATE_ZERO_SHADOW) {
 171                                void *page;
 172
 173                                if (mode == POPULATE_ONE2ONE) {
 174                                        page = (void *)address;
 175                                } else {
 176                                        page = kasan_early_alloc_segment();
 177                                        memset(page, 0, _SEGMENT_SIZE);
 178                                }
 179                                pmd_val(*pm_dir) = __pa(page) | sgt_prot;
 180                                address = (address + PMD_SIZE) & PMD_MASK;
 181                                continue;
 182                        }
 183
 184                        pt_dir = kasan_early_pte_alloc();
 185                        pmd_populate(&init_mm, pm_dir, pt_dir);
 186                } else if (pmd_large(*pm_dir)) {
 187                        address = (address + PMD_SIZE) & PMD_MASK;
 188                        continue;
 189                }
 190
 191                pt_dir = pte_offset_kernel(pm_dir, address);
 192                if (pte_none(*pt_dir)) {
 193                        void *page;
 194
 195                        switch (mode) {
 196                        case POPULATE_ONE2ONE:
 197                                page = (void *)address;
 198                                pte_val(*pt_dir) = __pa(page) | pgt_prot;
 199                                break;
 200                        case POPULATE_MAP:
 201                                page = kasan_early_alloc_pages(0);
 202                                memset(page, 0, PAGE_SIZE);
 203                                pte_val(*pt_dir) = __pa(page) | pgt_prot;
 204                                break;
 205                        case POPULATE_ZERO_SHADOW:
 206                                page = kasan_early_shadow_page;
 207                                pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
 208                                break;
 209                        case POPULATE_SHALLOW:
 210                                /* should never happen */
 211                                break;
 212                        }
 213                }
 214                address += PAGE_SIZE;
 215        }
 216}
 217
 218static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
 219{
 220        unsigned long asce_bits;
 221
 222        asce_bits = asce_type | _ASCE_TABLE_LENGTH;
 223        S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
 224        S390_lowcore.user_asce = S390_lowcore.kernel_asce;
 225
 226        __ctl_load(S390_lowcore.kernel_asce, 1, 1);
 227        __ctl_load(S390_lowcore.kernel_asce, 7, 7);
 228        __ctl_load(S390_lowcore.kernel_asce, 13, 13);
 229}
 230
 231static void __init kasan_enable_dat(void)
 232{
 233        psw_t psw;
 234
 235        psw.mask = __extract_psw();
 236        psw_bits(psw).dat = 1;
 237        psw_bits(psw).as = PSW_BITS_AS_HOME;
 238        __load_psw_mask(psw.mask);
 239}
 240
 241static void __init kasan_early_detect_facilities(void)
 242{
 243        if (test_facility(8)) {
 244                has_edat = true;
 245                __ctl_set_bit(0, 23);
 246        }
 247        if (!noexec_disabled && test_facility(130)) {
 248                has_nx = true;
 249                __ctl_set_bit(0, 20);
 250        }
 251}
 252
 253void __init kasan_early_init(void)
 254{
 255        unsigned long shadow_alloc_size;
 256        unsigned long initrd_end;
 257        unsigned long memsize;
 258        unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
 259        pte_t pte_z;
 260        pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
 261        pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
 262        p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
 263
 264        kasan_early_detect_facilities();
 265        if (!has_nx)
 266                pgt_prot &= ~_PAGE_NOEXEC;
 267        pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot);
 268
 269        memsize = get_mem_detect_end();
 270        if (!memsize)
 271                kasan_early_panic("cannot detect physical memory size\n");
 272        /*
 273         * Kasan currently supports standby memory but only if it follows
 274         * online memory (default allocation), i.e. no memory holes.
 275         * - memsize represents end of online memory
 276         * - ident_map_size represents online + standby and memory limits
 277         *   accounted.
 278         * Kasan maps "memsize" right away.
 279         * [0, memsize]                 - as identity mapping
 280         * [__sha(0), __sha(memsize)]   - shadow memory for identity mapping
 281         * The rest [memsize, ident_map_size] if memsize < ident_map_size
 282         * could be mapped/unmapped dynamically later during memory hotplug.
 283         */
 284        memsize = min(memsize, ident_map_size);
 285
 286        BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
 287        BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
 288        crst_table_init((unsigned long *)early_pg_dir, _REGION2_ENTRY_EMPTY);
 289
 290        /* init kasan zero shadow */
 291        crst_table_init((unsigned long *)kasan_early_shadow_p4d,
 292                                p4d_val(p4d_z));
 293        crst_table_init((unsigned long *)kasan_early_shadow_pud,
 294                                pud_val(pud_z));
 295        crst_table_init((unsigned long *)kasan_early_shadow_pmd,
 296                                pmd_val(pmd_z));
 297        memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
 298
 299        shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
 300        pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
 301        if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
 302                initrd_end =
 303                    round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
 304                pgalloc_low = max(pgalloc_low, initrd_end);
 305        }
 306
 307        if (pgalloc_low + shadow_alloc_size > memsize)
 308                kasan_early_panic("out of memory during initialisation\n");
 309
 310        if (has_edat) {
 311                segment_pos = round_down(memsize, _SEGMENT_SIZE);
 312                segment_low = segment_pos - shadow_alloc_size;
 313                pgalloc_pos = segment_low;
 314        } else {
 315                pgalloc_pos = memsize;
 316        }
 317        init_mm.pgd = early_pg_dir;
 318        /*
 319         * Current memory layout:
 320         * +- 0 -------------+     +- shadow start -+
 321         * | 1:1 ram mapping |    /| 1/8 ram        |
 322         * |                 |   / |                |
 323         * +- end of ram ----+  /  +----------------+
 324         * | ... gap ...     | /   |                |
 325         * |                 |/    |    kasan       |
 326         * +- shadow start --+     |    zero        |
 327         * | 1/8 addr space  |     |    page        |
 328         * +- shadow end    -+     |    mapping     |
 329         * | ... gap ...     |\    |  (untracked)   |
 330         * +- vmalloc area  -+ \   |                |
 331         * | vmalloc_size    |  \  |                |
 332         * +- modules vaddr -+   \ +----------------+
 333         * | 2Gb             |    \|      unmapped  | allocated per module
 334         * +-----------------+     +- shadow end ---+
 335         *
 336         * Current memory layout (KASAN_VMALLOC):
 337         * +- 0 -------------+     +- shadow start -+
 338         * | 1:1 ram mapping |    /| 1/8 ram        |
 339         * |                 |   / |                |
 340         * +- end of ram ----+  /  +----------------+
 341         * | ... gap ...     | /   |    kasan       |
 342         * |                 |/    |    zero        |
 343         * +- shadow start --+     |    page        |
 344         * | 1/8 addr space  |     |    mapping     |
 345         * +- shadow end    -+     |  (untracked)   |
 346         * | ... gap ...     |\    |                |
 347         * +- vmalloc area  -+ \   +- vmalloc area -+
 348         * | vmalloc_size    |  \  |shallow populate|
 349         * +- modules vaddr -+   \ +- modules area -+
 350         * | 2Gb             |    \|shallow populate|
 351         * +-----------------+     +- shadow end ---+
 352         */
 353        /* populate kasan shadow (for identity mapping and zero page mapping) */
 354        kasan_early_pgtable_populate(__sha(0), __sha(memsize), POPULATE_MAP);
 355        if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
 356                /* shallowly populate kasan shadow for vmalloc and modules */
 357                kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
 358                                             POPULATE_SHALLOW);
 359        }
 360        /* populate kasan shadow for untracked memory */
 361        kasan_early_pgtable_populate(__sha(ident_map_size),
 362                                     IS_ENABLED(CONFIG_KASAN_VMALLOC) ?
 363                                                   __sha(VMALLOC_START) :
 364                                                   __sha(MODULES_VADDR),
 365                                     POPULATE_ZERO_SHADOW);
 366        kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
 367                                     POPULATE_ZERO_SHADOW);
 368        /* memory allocated for identity mapping structs will be freed later */
 369        pgalloc_freeable = pgalloc_pos;
 370        /* populate identity mapping */
 371        kasan_early_pgtable_populate(0, memsize, POPULATE_ONE2ONE);
 372        kasan_set_pgd(early_pg_dir, _ASCE_TYPE_REGION2);
 373        kasan_enable_dat();
 374        /* enable kasan */
 375        init_task.kasan_depth = 0;
 376        memblock_reserve(pgalloc_pos, memsize - pgalloc_pos);
 377        sclp_early_printk("KernelAddressSanitizer initialized\n");
 378}
 379
 380void __init kasan_copy_shadow_mapping(void)
 381{
 382        /*
 383         * At this point we are still running on early pages setup early_pg_dir,
 384         * while swapper_pg_dir has just been initialized with identity mapping.
 385         * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
 386         */
 387
 388        pgd_t *pg_dir_src;
 389        pgd_t *pg_dir_dst;
 390        p4d_t *p4_dir_src;
 391        p4d_t *p4_dir_dst;
 392
 393        pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
 394        pg_dir_dst = pgd_offset_raw(init_mm.pgd, KASAN_SHADOW_START);
 395        p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
 396        p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
 397        memcpy(p4_dir_dst, p4_dir_src,
 398               (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
 399}
 400
 401void __init kasan_free_early_identity(void)
 402{
 403        memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
 404}
 405
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.