linux/fs/proc/kcore.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *      fs/proc/kcore.c kernel ELF core dumper
   4 *
   5 *      Modelled on fs/exec.c:aout_core_dump()
   6 *      Jeremy Fitzhardinge <jeremy@sw.oz.au>
   7 *      ELF version written by David Howells <David.Howells@nexor.co.uk>
   8 *      Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
   9 *      Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
  10 *      Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
  11 */
  12
  13#include <linux/crash_core.h>
  14#include <linux/mm.h>
  15#include <linux/proc_fs.h>
  16#include <linux/kcore.h>
  17#include <linux/user.h>
  18#include <linux/capability.h>
  19#include <linux/elf.h>
  20#include <linux/elfcore.h>
  21#include <linux/notifier.h>
  22#include <linux/vmalloc.h>
  23#include <linux/highmem.h>
  24#include <linux/printk.h>
  25#include <linux/memblock.h>
  26#include <linux/init.h>
  27#include <linux/slab.h>
  28#include <linux/uaccess.h>
  29#include <asm/io.h>
  30#include <linux/list.h>
  31#include <linux/ioport.h>
  32#include <linux/memory.h>
  33#include <linux/sched/task.h>
  34#include <linux/security.h>
  35#include <asm/sections.h>
  36#include "internal.h"
  37
  38#define CORE_STR "CORE"
  39
  40#ifndef ELF_CORE_EFLAGS
  41#define ELF_CORE_EFLAGS 0
  42#endif
  43
  44static struct proc_dir_entry *proc_root_kcore;
  45
  46
  47#ifndef kc_vaddr_to_offset
  48#define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
  49#endif
  50#ifndef kc_offset_to_vaddr
  51#define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
  52#endif
  53
  54static LIST_HEAD(kclist_head);
  55static DECLARE_RWSEM(kclist_lock);
  56static int kcore_need_update = 1;
  57
  58/*
  59 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
  60 * Same as oldmem_pfn_is_ram in vmcore
  61 */
  62static int (*mem_pfn_is_ram)(unsigned long pfn);
  63
  64int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
  65{
  66        if (mem_pfn_is_ram)
  67                return -EBUSY;
  68        mem_pfn_is_ram = fn;
  69        return 0;
  70}
  71
  72static int pfn_is_ram(unsigned long pfn)
  73{
  74        if (mem_pfn_is_ram)
  75                return mem_pfn_is_ram(pfn);
  76        else
  77                return 1;
  78}
  79
  80/* This doesn't grab kclist_lock, so it should only be used at init time. */
  81void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
  82                       int type)
  83{
  84        new->addr = (unsigned long)addr;
  85        new->size = size;
  86        new->type = type;
  87
  88        list_add_tail(&new->list, &kclist_head);
  89}
  90
  91static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
  92                             size_t *data_offset)
  93{
  94        size_t try, size;
  95        struct kcore_list *m;
  96
  97        *nphdr = 1; /* PT_NOTE */
  98        size = 0;
  99
 100        list_for_each_entry(m, &kclist_head, list) {
 101                try = kc_vaddr_to_offset((size_t)m->addr + m->size);
 102                if (try > size)
 103                        size = try;
 104                *nphdr = *nphdr + 1;
 105        }
 106
 107        *phdrs_len = *nphdr * sizeof(struct elf_phdr);
 108        *notes_len = (4 * sizeof(struct elf_note) +
 109                      3 * ALIGN(sizeof(CORE_STR), 4) +
 110                      VMCOREINFO_NOTE_NAME_BYTES +
 111                      ALIGN(sizeof(struct elf_prstatus), 4) +
 112                      ALIGN(sizeof(struct elf_prpsinfo), 4) +
 113                      ALIGN(arch_task_struct_size, 4) +
 114                      ALIGN(vmcoreinfo_size, 4));
 115        *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
 116                                  *notes_len);
 117        return *data_offset + size;
 118}
 119
 120#ifdef CONFIG_HIGHMEM
 121/*
 122 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
 123 * because memory hole is not as big as !HIGHMEM case.
 124 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
 125 */
 126static int kcore_ram_list(struct list_head *head)
 127{
 128        struct kcore_list *ent;
 129
 130        ent = kmalloc(sizeof(*ent), GFP_KERNEL);
 131        if (!ent)
 132                return -ENOMEM;
 133        ent->addr = (unsigned long)__va(0);
 134        ent->size = max_low_pfn << PAGE_SHIFT;
 135        ent->type = KCORE_RAM;
 136        list_add(&ent->list, head);
 137        return 0;
 138}
 139
 140#else /* !CONFIG_HIGHMEM */
 141
 142#ifdef CONFIG_SPARSEMEM_VMEMMAP
 143/* calculate vmemmap's address from given system ram pfn and register it */
 144static int
 145get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
 146{
 147        unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
 148        unsigned long nr_pages = ent->size >> PAGE_SHIFT;
 149        unsigned long start, end;
 150        struct kcore_list *vmm, *tmp;
 151
 152
 153        start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
 154        end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
 155        end = PAGE_ALIGN(end);
 156        /* overlap check (because we have to align page */
 157        list_for_each_entry(tmp, head, list) {
 158                if (tmp->type != KCORE_VMEMMAP)
 159                        continue;
 160                if (start < tmp->addr + tmp->size)
 161                        if (end > tmp->addr)
 162                                end = tmp->addr;
 163        }
 164        if (start < end) {
 165                vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
 166                if (!vmm)
 167                        return 0;
 168                vmm->addr = start;
 169                vmm->size = end - start;
 170                vmm->type = KCORE_VMEMMAP;
 171                list_add_tail(&vmm->list, head);
 172        }
 173        return 1;
 174
 175}
 176#else
 177static int
 178get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
 179{
 180        return 1;
 181}
 182
 183#endif
 184
 185static int
 186kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
 187{
 188        struct list_head *head = (struct list_head *)arg;
 189        struct kcore_list *ent;
 190        struct page *p;
 191
 192        if (!pfn_valid(pfn))
 193                return 1;
 194
 195        p = pfn_to_page(pfn);
 196
 197        ent = kmalloc(sizeof(*ent), GFP_KERNEL);
 198        if (!ent)
 199                return -ENOMEM;
 200        ent->addr = (unsigned long)page_to_virt(p);
 201        ent->size = nr_pages << PAGE_SHIFT;
 202
 203        if (!virt_addr_valid(ent->addr))
 204                goto free_out;
 205
 206        /* cut not-mapped area. ....from ppc-32 code. */
 207        if (ULONG_MAX - ent->addr < ent->size)
 208                ent->size = ULONG_MAX - ent->addr;
 209
 210        /*
 211         * We've already checked virt_addr_valid so we know this address
 212         * is a valid pointer, therefore we can check against it to determine
 213         * if we need to trim
 214         */
 215        if (VMALLOC_START > ent->addr) {
 216                if (VMALLOC_START - ent->addr < ent->size)
 217                        ent->size = VMALLOC_START - ent->addr;
 218        }
 219
 220        ent->type = KCORE_RAM;
 221        list_add_tail(&ent->list, head);
 222
 223        if (!get_sparsemem_vmemmap_info(ent, head)) {
 224                list_del(&ent->list);
 225                goto free_out;
 226        }
 227
 228        return 0;
 229free_out:
 230        kfree(ent);
 231        return 1;
 232}
 233
 234static int kcore_ram_list(struct list_head *list)
 235{
 236        int nid, ret;
 237        unsigned long end_pfn;
 238
 239        /* Not inialized....update now */
 240        /* find out "max pfn" */
 241        end_pfn = 0;
 242        for_each_node_state(nid, N_MEMORY) {
 243                unsigned long node_end;
 244                node_end = node_end_pfn(nid);
 245                if (end_pfn < node_end)
 246                        end_pfn = node_end;
 247        }
 248        /* scan 0 to max_pfn */
 249        ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private);
 250        if (ret)
 251                return -ENOMEM;
 252        return 0;
 253}
 254#endif /* CONFIG_HIGHMEM */
 255
 256static int kcore_update_ram(void)
 257{
 258        LIST_HEAD(list);
 259        LIST_HEAD(garbage);
 260        int nphdr;
 261        size_t phdrs_len, notes_len, data_offset;
 262        struct kcore_list *tmp, *pos;
 263        int ret = 0;
 264
 265        down_write(&kclist_lock);
 266        if (!xchg(&kcore_need_update, 0))
 267                goto out;
 268
 269        ret = kcore_ram_list(&list);
 270        if (ret) {
 271                /* Couldn't get the RAM list, try again next time. */
 272                WRITE_ONCE(kcore_need_update, 1);
 273                list_splice_tail(&list, &garbage);
 274                goto out;
 275        }
 276
 277        list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
 278                if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP)
 279                        list_move(&pos->list, &garbage);
 280        }
 281        list_splice_tail(&list, &kclist_head);
 282
 283        proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, &notes_len,
 284                                               &data_offset);
 285
 286out:
 287        up_write(&kclist_lock);
 288        list_for_each_entry_safe(pos, tmp, &garbage, list) {
 289                list_del(&pos->list);
 290                kfree(pos);
 291        }
 292        return ret;
 293}
 294
 295static void append_kcore_note(char *notes, size_t *i, const char *name,
 296                              unsigned int type, const void *desc,
 297                              size_t descsz)
 298{
 299        struct elf_note *note = (struct elf_note *)&notes[*i];
 300
 301        note->n_namesz = strlen(name) + 1;
 302        note->n_descsz = descsz;
 303        note->n_type = type;
 304        *i += sizeof(*note);
 305        memcpy(&notes[*i], name, note->n_namesz);
 306        *i = ALIGN(*i + note->n_namesz, 4);
 307        memcpy(&notes[*i], desc, descsz);
 308        *i = ALIGN(*i + descsz, 4);
 309}
 310
 311static ssize_t
 312read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
 313{
 314        char *buf = file->private_data;
 315        size_t phdrs_offset, notes_offset, data_offset;
 316        size_t phdrs_len, notes_len;
 317        struct kcore_list *m;
 318        size_t tsz;
 319        int nphdr;
 320        unsigned long start;
 321        size_t orig_buflen = buflen;
 322        int ret = 0;
 323
 324        down_read(&kclist_lock);
 325
 326        get_kcore_size(&nphdr, &phdrs_len, &notes_len, &data_offset);
 327        phdrs_offset = sizeof(struct elfhdr);
 328        notes_offset = phdrs_offset + phdrs_len;
 329
 330        /* ELF file header. */
 331        if (buflen && *fpos < sizeof(struct elfhdr)) {
 332                struct elfhdr ehdr = {
 333                        .e_ident = {
 334                                [EI_MAG0] = ELFMAG0,
 335                                [EI_MAG1] = ELFMAG1,
 336                                [EI_MAG2] = ELFMAG2,
 337                                [EI_MAG3] = ELFMAG3,
 338                                [EI_CLASS] = ELF_CLASS,
 339                                [EI_DATA] = ELF_DATA,
 340                                [EI_VERSION] = EV_CURRENT,
 341                                [EI_OSABI] = ELF_OSABI,
 342                        },
 343                        .e_type = ET_CORE,
 344                        .e_machine = ELF_ARCH,
 345                        .e_version = EV_CURRENT,
 346                        .e_phoff = sizeof(struct elfhdr),
 347                        .e_flags = ELF_CORE_EFLAGS,
 348                        .e_ehsize = sizeof(struct elfhdr),
 349                        .e_phentsize = sizeof(struct elf_phdr),
 350                        .e_phnum = nphdr,
 351                };
 352
 353                tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
 354                if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) {
 355                        ret = -EFAULT;
 356                        goto out;
 357                }
 358
 359                buffer += tsz;
 360                buflen -= tsz;
 361                *fpos += tsz;
 362        }
 363
 364        /* ELF program headers. */
 365        if (buflen && *fpos < phdrs_offset + phdrs_len) {
 366                struct elf_phdr *phdrs, *phdr;
 367
 368                phdrs = kzalloc(phdrs_len, GFP_KERNEL);
 369                if (!phdrs) {
 370                        ret = -ENOMEM;
 371                        goto out;
 372                }
 373
 374                phdrs[0].p_type = PT_NOTE;
 375                phdrs[0].p_offset = notes_offset;
 376                phdrs[0].p_filesz = notes_len;
 377
 378                phdr = &phdrs[1];
 379                list_for_each_entry(m, &kclist_head, list) {
 380                        phdr->p_type = PT_LOAD;
 381                        phdr->p_flags = PF_R | PF_W | PF_X;
 382                        phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
 383                        if (m->type == KCORE_REMAP)
 384                                phdr->p_vaddr = (size_t)m->vaddr;
 385                        else
 386                                phdr->p_vaddr = (size_t)m->addr;
 387                        if (m->type == KCORE_RAM || m->type == KCORE_REMAP)
 388                                phdr->p_paddr = __pa(m->addr);
 389                        else if (m->type == KCORE_TEXT)
 390                                phdr->p_paddr = __pa_symbol(m->addr);
 391                        else
 392                                phdr->p_paddr = (elf_addr_t)-1;
 393                        phdr->p_filesz = phdr->p_memsz = m->size;
 394                        phdr->p_align = PAGE_SIZE;
 395                        phdr++;
 396                }
 397
 398                tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
 399                if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset,
 400                                 tsz)) {
 401                        kfree(phdrs);
 402                        ret = -EFAULT;
 403                        goto out;
 404                }
 405                kfree(phdrs);
 406
 407                buffer += tsz;
 408                buflen -= tsz;
 409                *fpos += tsz;
 410        }
 411
 412        /* ELF note segment. */
 413        if (buflen && *fpos < notes_offset + notes_len) {
 414                struct elf_prstatus prstatus = {};
 415                struct elf_prpsinfo prpsinfo = {
 416                        .pr_sname = 'R',
 417                        .pr_fname = "vmlinux",
 418                };
 419                char *notes;
 420                size_t i = 0;
 421
 422                strlcpy(prpsinfo.pr_psargs, saved_command_line,
 423                        sizeof(prpsinfo.pr_psargs));
 424
 425                notes = kzalloc(notes_len, GFP_KERNEL);
 426                if (!notes) {
 427                        ret = -ENOMEM;
 428                        goto out;
 429                }
 430
 431                append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
 432                                  sizeof(prstatus));
 433                append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
 434                                  sizeof(prpsinfo));
 435                append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
 436                                  arch_task_struct_size);
 437                /*
 438                 * vmcoreinfo_size is mostly constant after init time, but it
 439                 * can be changed by crash_save_vmcoreinfo(). Racing here with a
 440                 * panic on another CPU before the machine goes down is insanely
 441                 * unlikely, but it's better to not leave potential buffer
 442                 * overflows lying around, regardless.
 443                 */
 444                append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
 445                                  vmcoreinfo_data,
 446                                  min(vmcoreinfo_size, notes_len - i));
 447
 448                tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
 449                if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) {
 450                        kfree(notes);
 451                        ret = -EFAULT;
 452                        goto out;
 453                }
 454                kfree(notes);
 455
 456                buffer += tsz;
 457                buflen -= tsz;
 458                *fpos += tsz;
 459        }
 460
 461        /*
 462         * Check to see if our file offset matches with any of
 463         * the addresses in the elf_phdr on our list.
 464         */
 465        start = kc_offset_to_vaddr(*fpos - data_offset);
 466        if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
 467                tsz = buflen;
 468
 469        m = NULL;
 470        while (buflen) {
 471                /*
 472                 * If this is the first iteration or the address is not within
 473                 * the previous entry, search for a matching entry.
 474                 */
 475                if (!m || start < m->addr || start >= m->addr + m->size) {
 476                        list_for_each_entry(m, &kclist_head, list) {
 477                                if (start >= m->addr &&
 478                                    start < m->addr + m->size)
 479                                        break;
 480                        }
 481                }
 482
 483                if (&m->list == &kclist_head) {
 484                        if (clear_user(buffer, tsz)) {
 485                                ret = -EFAULT;
 486                                goto out;
 487                        }
 488                        m = NULL;       /* skip the list anchor */
 489                } else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) {
 490                        if (clear_user(buffer, tsz)) {
 491                                ret = -EFAULT;
 492                                goto out;
 493                        }
 494                } else if (m->type == KCORE_VMALLOC) {
 495                        vread(buf, (char *)start, tsz);
 496                        /* we have to zero-fill user buffer even if no read */
 497                        if (copy_to_user(buffer, buf, tsz)) {
 498                                ret = -EFAULT;
 499                                goto out;
 500                        }
 501                } else if (m->type == KCORE_USER) {
 502                        /* User page is handled prior to normal kernel page: */
 503                        if (copy_to_user(buffer, (char *)start, tsz)) {
 504                                ret = -EFAULT;
 505                                goto out;
 506                        }
 507                } else {
 508                        if (kern_addr_valid(start)) {
 509                                /*
 510                                 * Using bounce buffer to bypass the
 511                                 * hardened user copy kernel text checks.
 512                                 */
 513                                if (copy_from_kernel_nofault(buf, (void *)start,
 514                                                tsz)) {
 515                                        if (clear_user(buffer, tsz)) {
 516                                                ret = -EFAULT;
 517                                                goto out;
 518                                        }
 519                                } else {
 520                                        if (copy_to_user(buffer, buf, tsz)) {
 521                                                ret = -EFAULT;
 522                                                goto out;
 523                                        }
 524                                }
 525                        } else {
 526                                if (clear_user(buffer, tsz)) {
 527                                        ret = -EFAULT;
 528                                        goto out;
 529                                }
 530                        }
 531                }
 532                buflen -= tsz;
 533                *fpos += tsz;
 534                buffer += tsz;
 535                start += tsz;
 536                tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
 537        }
 538
 539out:
 540        up_read(&kclist_lock);
 541        if (ret)
 542                return ret;
 543        return orig_buflen - buflen;
 544}
 545
 546static int open_kcore(struct inode *inode, struct file *filp)
 547{
 548        int ret = security_locked_down(LOCKDOWN_KCORE);
 549
 550        if (!capable(CAP_SYS_RAWIO))
 551                return -EPERM;
 552
 553        if (ret)
 554                return ret;
 555
 556        filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
 557        if (!filp->private_data)
 558                return -ENOMEM;
 559
 560        if (kcore_need_update)
 561                kcore_update_ram();
 562        if (i_size_read(inode) != proc_root_kcore->size) {
 563                inode_lock(inode);
 564                i_size_write(inode, proc_root_kcore->size);
 565                inode_unlock(inode);
 566        }
 567        return 0;
 568}
 569
 570static int release_kcore(struct inode *inode, struct file *file)
 571{
 572        kfree(file->private_data);
 573        return 0;
 574}
 575
 576static const struct proc_ops kcore_proc_ops = {
 577        .proc_read      = read_kcore,
 578        .proc_open      = open_kcore,
 579        .proc_release   = release_kcore,
 580        .proc_lseek     = default_llseek,
 581};
 582
 583/* just remember that we have to update kcore */
 584static int __meminit kcore_callback(struct notifier_block *self,
 585                                    unsigned long action, void *arg)
 586{
 587        switch (action) {
 588        case MEM_ONLINE:
 589        case MEM_OFFLINE:
 590                kcore_need_update = 1;
 591                break;
 592        }
 593        return NOTIFY_OK;
 594}
 595
 596static struct notifier_block kcore_callback_nb __meminitdata = {
 597        .notifier_call = kcore_callback,
 598        .priority = 0,
 599};
 600
 601static struct kcore_list kcore_vmalloc;
 602
 603#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
 604static struct kcore_list kcore_text;
 605/*
 606 * If defined, special segment is used for mapping kernel text instead of
 607 * direct-map area. We need to create special TEXT section.
 608 */
 609static void __init proc_kcore_text_init(void)
 610{
 611        kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
 612}
 613#else
 614static void __init proc_kcore_text_init(void)
 615{
 616}
 617#endif
 618
 619#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
 620/*
 621 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
 622 */
 623static struct kcore_list kcore_modules;
 624static void __init add_modules_range(void)
 625{
 626        if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
 627                kclist_add(&kcore_modules, (void *)MODULES_VADDR,
 628                        MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
 629        }
 630}
 631#else
 632static void __init add_modules_range(void)
 633{
 634}
 635#endif
 636
 637static int __init proc_kcore_init(void)
 638{
 639        proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
 640        if (!proc_root_kcore) {
 641                pr_err("couldn't create /proc/kcore\n");
 642                return 0; /* Always returns 0. */
 643        }
 644        /* Store text area if it's special */
 645        proc_kcore_text_init();
 646        /* Store vmalloc area */
 647        kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
 648                VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
 649        add_modules_range();
 650        /* Store direct-map area from physical memory map */
 651        kcore_update_ram();
 652        register_hotmemory_notifier(&kcore_callback_nb);
 653
 654        return 0;
 655}
 656fs_initcall(proc_kcore_init);
 657