linux/mm/mincore.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *      linux/mm/mincore.c
   4 *
   5 * Copyright (C) 1994-2006  Linus Torvalds
   6 */
   7
   8/*
   9 * The mincore() system call.
  10 */
  11#include <linux/pagemap.h>
  12#include <linux/gfp.h>
  13#include <linux/pagewalk.h>
  14#include <linux/mman.h>
  15#include <linux/syscalls.h>
  16#include <linux/swap.h>
  17#include <linux/swapops.h>
  18#include <linux/shmem_fs.h>
  19#include <linux/hugetlb.h>
  20#include <linux/pgtable.h>
  21
  22#include <linux/uaccess.h>
  23
  24static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
  25                        unsigned long end, struct mm_walk *walk)
  26{
  27#ifdef CONFIG_HUGETLB_PAGE
  28        unsigned char present;
  29        unsigned char *vec = walk->private;
  30
  31        /*
  32         * Hugepages under user process are always in RAM and never
  33         * swapped out, but theoretically it needs to be checked.
  34         */
  35        present = pte && !huge_pte_none(huge_ptep_get(pte));
  36        for (; addr != end; vec++, addr += PAGE_SIZE)
  37                *vec = present;
  38        walk->private = vec;
  39#else
  40        BUG();
  41#endif
  42        return 0;
  43}
  44
  45/*
  46 * Later we can get more picky about what "in core" means precisely.
  47 * For now, simply check to see if the page is in the page cache,
  48 * and is up to date; i.e. that no page-in operation would be required
  49 * at this time if an application were to map and access this page.
  50 */
  51static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
  52{
  53        unsigned char present = 0;
  54        struct page *page;
  55
  56        /*
  57         * When tmpfs swaps out a page from a file, any process mapping that
  58         * file will not get a swp_entry_t in its pte, but rather it is like
  59         * any other file mapping (ie. marked !present and faulted in with
  60         * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
  61         */
  62        page = find_get_incore_page(mapping, index);
  63        if (page) {
  64                present = PageUptodate(page);
  65                put_page(page);
  66        }
  67
  68        return present;
  69}
  70
  71static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
  72                                struct vm_area_struct *vma, unsigned char *vec)
  73{
  74        unsigned long nr = (end - addr) >> PAGE_SHIFT;
  75        int i;
  76
  77        if (vma->vm_file) {
  78                pgoff_t pgoff;
  79
  80                pgoff = linear_page_index(vma, addr);
  81                for (i = 0; i < nr; i++, pgoff++)
  82                        vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
  83        } else {
  84                for (i = 0; i < nr; i++)
  85                        vec[i] = 0;
  86        }
  87        return nr;
  88}
  89
  90static int mincore_unmapped_range(unsigned long addr, unsigned long end,
  91                                   __always_unused int depth,
  92                                   struct mm_walk *walk)
  93{
  94        walk->private += __mincore_unmapped_range(addr, end,
  95                                                  walk->vma, walk->private);
  96        return 0;
  97}
  98
  99static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 100                        struct mm_walk *walk)
 101{
 102        spinlock_t *ptl;
 103        struct vm_area_struct *vma = walk->vma;
 104        pte_t *ptep;
 105        unsigned char *vec = walk->private;
 106        int nr = (end - addr) >> PAGE_SHIFT;
 107
 108        ptl = pmd_trans_huge_lock(pmd, vma);
 109        if (ptl) {
 110                memset(vec, 1, nr);
 111                spin_unlock(ptl);
 112                goto out;
 113        }
 114
 115        if (pmd_trans_unstable(pmd)) {
 116                __mincore_unmapped_range(addr, end, vma, vec);
 117                goto out;
 118        }
 119
 120        ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 121        for (; addr != end; ptep++, addr += PAGE_SIZE) {
 122                pte_t pte = *ptep;
 123
 124                if (pte_none(pte))
 125                        __mincore_unmapped_range(addr, addr + PAGE_SIZE,
 126                                                 vma, vec);
 127                else if (pte_present(pte))
 128                        *vec = 1;
 129                else { /* pte is a swap entry */
 130                        swp_entry_t entry = pte_to_swp_entry(pte);
 131
 132                        if (non_swap_entry(entry)) {
 133                                /*
 134                                 * migration or hwpoison entries are always
 135                                 * uptodate
 136                                 */
 137                                *vec = 1;
 138                        } else {
 139#ifdef CONFIG_SWAP
 140                                *vec = mincore_page(swap_address_space(entry),
 141                                                    swp_offset(entry));
 142#else
 143                                WARN_ON(1);
 144                                *vec = 1;
 145#endif
 146                        }
 147                }
 148                vec++;
 149        }
 150        pte_unmap_unlock(ptep - 1, ptl);
 151out:
 152        walk->private += nr;
 153        cond_resched();
 154        return 0;
 155}
 156
 157static inline bool can_do_mincore(struct vm_area_struct *vma)
 158{
 159        if (vma_is_anonymous(vma))
 160                return true;
 161        if (!vma->vm_file)
 162                return false;
 163        /*
 164         * Reveal pagecache information only for non-anonymous mappings that
 165         * correspond to the files the calling process could (if tried) open
 166         * for writing; otherwise we'd be including shared non-exclusive
 167         * mappings, which opens a side channel.
 168         */
 169        return inode_owner_or_capable(&init_user_ns,
 170                                      file_inode(vma->vm_file)) ||
 171               file_permission(vma->vm_file, MAY_WRITE) == 0;
 172}
 173
 174static const struct mm_walk_ops mincore_walk_ops = {
 175        .pmd_entry              = mincore_pte_range,
 176        .pte_hole               = mincore_unmapped_range,
 177        .hugetlb_entry          = mincore_hugetlb,
 178};
 179
 180/*
 181 * Do a chunk of "sys_mincore()". We've already checked
 182 * all the arguments, we hold the mmap semaphore: we should
 183 * just return the amount of info we're asked for.
 184 */
 185static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
 186{
 187        struct vm_area_struct *vma;
 188        unsigned long end;
 189        int err;
 190
 191        vma = find_vma(current->mm, addr);
 192        if (!vma || addr < vma->vm_start)
 193                return -ENOMEM;
 194        end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
 195        if (!can_do_mincore(vma)) {
 196                unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
 197                memset(vec, 1, pages);
 198                return pages;
 199        }
 200        err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec);
 201        if (err < 0)
 202                return err;
 203        return (end - addr) >> PAGE_SHIFT;
 204}
 205
 206/*
 207 * The mincore(2) system call.
 208 *
 209 * mincore() returns the memory residency status of the pages in the
 210 * current process's address space specified by [addr, addr + len).
 211 * The status is returned in a vector of bytes.  The least significant
 212 * bit of each byte is 1 if the referenced page is in memory, otherwise
 213 * it is zero.
 214 *
 215 * Because the status of a page can change after mincore() checks it
 216 * but before it returns to the application, the returned vector may
 217 * contain stale information.  Only locked pages are guaranteed to
 218 * remain in memory.
 219 *
 220 * return values:
 221 *  zero    - success
 222 *  -EFAULT - vec points to an illegal address
 223 *  -EINVAL - addr is not a multiple of PAGE_SIZE
 224 *  -ENOMEM - Addresses in the range [addr, addr + len] are
 225 *              invalid for the address space of this process, or
 226 *              specify one or more pages which are not currently
 227 *              mapped
 228 *  -EAGAIN - A kernel resource was temporarily unavailable.
 229 */
 230SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
 231                unsigned char __user *, vec)
 232{
 233        long retval;
 234        unsigned long pages;
 235        unsigned char *tmp;
 236
 237        start = untagged_addr(start);
 238
 239        /* Check the start address: needs to be page-aligned.. */
 240        if (start & ~PAGE_MASK)
 241                return -EINVAL;
 242
 243        /* ..and we need to be passed a valid user-space range */
 244        if (!access_ok((void __user *) start, len))
 245                return -ENOMEM;
 246
 247        /* This also avoids any overflows on PAGE_ALIGN */
 248        pages = len >> PAGE_SHIFT;
 249        pages += (offset_in_page(len)) != 0;
 250
 251        if (!access_ok(vec, pages))
 252                return -EFAULT;
 253
 254        tmp = (void *) __get_free_page(GFP_USER);
 255        if (!tmp)
 256                return -EAGAIN;
 257
 258        retval = 0;
 259        while (pages) {
 260                /*
 261                 * Do at most PAGE_SIZE entries per iteration, due to
 262                 * the temporary buffer size.
 263                 */
 264                mmap_read_lock(current->mm);
 265                retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
 266                mmap_read_unlock(current->mm);
 267
 268                if (retval <= 0)
 269                        break;
 270                if (copy_to_user(vec, tmp, retval)) {
 271                        retval = -EFAULT;
 272                        break;
 273                }
 274                pages -= retval;
 275                vec += retval;
 276                start += retval << PAGE_SHIFT;
 277                retval = 0;
 278        }
 279        free_page((unsigned long) tmp);
 280        return retval;
 281}
 282
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.