linux/fs/proc/task_mmu.c
<<
>>
Prefs
   1#include <linux/mm.h>
   2#include <linux/hugetlb.h>
   3#include <linux/mount.h>
   4#include <linux/seq_file.h>
   5#include <linux/highmem.h>
   6#include <linux/ptrace.h>
   7#include <linux/pagemap.h>
   8#include <linux/mempolicy.h>
   9
  10#include <asm/elf.h>
  11#include <asm/uaccess.h>
  12#include <asm/tlbflush.h>
  13#include "internal.h"
  14
  15char *task_mem(struct mm_struct *mm, char *buffer)
  16{
  17        unsigned long data, text, lib;
  18        unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  19
  20        /*
  21         * Note: to minimize their overhead, mm maintains hiwater_vm and
  22         * hiwater_rss only when about to *lower* total_vm or rss.  Any
  23         * collector of these hiwater stats must therefore get total_vm
  24         * and rss too, which will usually be the higher.  Barriers? not
  25         * worth the effort, such snapshots can always be inconsistent.
  26         */
  27        hiwater_vm = total_vm = mm->total_vm;
  28        if (hiwater_vm < mm->hiwater_vm)
  29                hiwater_vm = mm->hiwater_vm;
  30        hiwater_rss = total_rss = get_mm_rss(mm);
  31        if (hiwater_rss < mm->hiwater_rss)
  32                hiwater_rss = mm->hiwater_rss;
  33
  34        data = mm->total_vm - mm->shared_vm - mm->stack_vm;
  35        text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
  36        lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
  37        buffer += sprintf(buffer,
  38                "VmPeak:\t%8lu kB\n"
  39                "VmSize:\t%8lu kB\n"
  40                "VmLck:\t%8lu kB\n"
  41                "VmHWM:\t%8lu kB\n"
  42                "VmRSS:\t%8lu kB\n"
  43                "VmData:\t%8lu kB\n"
  44                "VmStk:\t%8lu kB\n"
  45                "VmExe:\t%8lu kB\n"
  46                "VmLib:\t%8lu kB\n"
  47                "VmPTE:\t%8lu kB\n",
  48                hiwater_vm << (PAGE_SHIFT-10),
  49                (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
  50                mm->locked_vm << (PAGE_SHIFT-10),
  51                hiwater_rss << (PAGE_SHIFT-10),
  52                total_rss << (PAGE_SHIFT-10),
  53                data << (PAGE_SHIFT-10),
  54                mm->stack_vm << (PAGE_SHIFT-10), text, lib,
  55                (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
  56        return buffer;
  57}
  58
  59unsigned long task_vsize(struct mm_struct *mm)
  60{
  61        return PAGE_SIZE * mm->total_vm;
  62}
  63
  64int task_statm(struct mm_struct *mm, int *shared, int *text,
  65               int *data, int *resident)
  66{
  67        *shared = get_mm_counter(mm, file_rss);
  68        *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  69                                                                >> PAGE_SHIFT;
  70        *data = mm->total_vm - mm->shared_vm;
  71        *resident = *shared + get_mm_counter(mm, anon_rss);
  72        return mm->total_vm;
  73}
  74
  75int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
  76{
  77        struct vm_area_struct * vma;
  78        int result = -ENOENT;
  79        struct task_struct *task = get_proc_task(inode);
  80        struct mm_struct * mm = NULL;
  81
  82        if (task) {
  83                mm = get_task_mm(task);
  84                put_task_struct(task);
  85        }
  86        if (!mm)
  87                goto out;
  88        down_read(&mm->mmap_sem);
  89
  90        vma = mm->mmap;
  91        while (vma) {
  92                if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
  93                        break;
  94                vma = vma->vm_next;
  95        }
  96
  97        if (vma) {
  98                *mnt = mntget(vma->vm_file->f_path.mnt);
  99                *dentry = dget(vma->vm_file->f_path.dentry);
 100                result = 0;
 101        }
 102
 103        up_read(&mm->mmap_sem);
 104        mmput(mm);
 105out:
 106        return result;
 107}
 108
 109static void pad_len_spaces(struct seq_file *m, int len)
 110{
 111        len = 25 + sizeof(void*) * 6 - len;
 112        if (len < 1)
 113                len = 1;
 114        seq_printf(m, "%*c", len, ' ');
 115}
 116
 117struct mem_size_stats
 118{
 119        unsigned long resident;
 120        unsigned long shared_clean;
 121        unsigned long shared_dirty;
 122        unsigned long private_clean;
 123        unsigned long private_dirty;
 124        unsigned long referenced;
 125};
 126
 127struct pmd_walker {
 128        struct vm_area_struct *vma;
 129        void *private;
 130        void (*action)(struct vm_area_struct *, pmd_t *, unsigned long,
 131                       unsigned long, void *);
 132};
 133
 134static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
 135{
 136        struct proc_maps_private *priv = m->private;
 137        struct task_struct *task = priv->task;
 138        struct vm_area_struct *vma = v;
 139        struct mm_struct *mm = vma->vm_mm;
 140        struct file *file = vma->vm_file;
 141        int flags = vma->vm_flags;
 142        unsigned long ino = 0;
 143        dev_t dev = 0;
 144        int len;
 145
 146        if (maps_protect && !ptrace_may_attach(task))
 147                return -EACCES;
 148
 149        if (file) {
 150                struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
 151                dev = inode->i_sb->s_dev;
 152                ino = inode->i_ino;
 153        }
 154
 155        seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
 156                        vma->vm_start,
 157                        vma->vm_end,
 158                        flags & VM_READ ? 'r' : '-',
 159                        flags & VM_WRITE ? 'w' : '-',
 160                        flags & VM_EXEC ? 'x' : '-',
 161                        flags & VM_MAYSHARE ? 's' : 'p',
 162                        vma->vm_pgoff << PAGE_SHIFT,
 163                        MAJOR(dev), MINOR(dev), ino, &len);
 164
 165        /*
 166         * Print the dentry name for named mappings, and a
 167         * special [heap] marker for the heap:
 168         */
 169        if (file) {
 170                pad_len_spaces(m, len);
 171                seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n");
 172        } else {
 173                const char *name = arch_vma_name(vma);
 174                if (!name) {
 175                        if (mm) {
 176                                if (vma->vm_start <= mm->start_brk &&
 177                                                vma->vm_end >= mm->brk) {
 178                                        name = "[heap]";
 179                                } else if (vma->vm_start <= mm->start_stack &&
 180                                           vma->vm_end >= mm->start_stack) {
 181                                        name = "[stack]";
 182                                }
 183                        } else {
 184                                name = "[vdso]";
 185                        }
 186                }
 187                if (name) {
 188                        pad_len_spaces(m, len);
 189                        seq_puts(m, name);
 190                }
 191        }
 192        seq_putc(m, '\n');
 193
 194        if (mss)
 195                seq_printf(m,
 196                           "Size:           %8lu kB\n"
 197                           "Rss:            %8lu kB\n"
 198                           "Shared_Clean:   %8lu kB\n"
 199                           "Shared_Dirty:   %8lu kB\n"
 200                           "Private_Clean:  %8lu kB\n"
 201                           "Private_Dirty:  %8lu kB\n"
 202                           "Referenced:     %8lu kB\n",
 203                           (vma->vm_end - vma->vm_start) >> 10,
 204                           mss->resident >> 10,
 205                           mss->shared_clean  >> 10,
 206                           mss->shared_dirty  >> 10,
 207                           mss->private_clean >> 10,
 208                           mss->private_dirty >> 10,
 209                           mss->referenced >> 10);
 210
 211        if (m->count < m->size)  /* vma is copied successfully */
 212                m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
 213        return 0;
 214}
 215
 216static int show_map(struct seq_file *m, void *v)
 217{
 218        return show_map_internal(m, v, NULL);
 219}
 220
 221static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 222                            unsigned long addr, unsigned long end,
 223                            void *private)
 224{
 225        struct mem_size_stats *mss = private;
 226        pte_t *pte, ptent;
 227        spinlock_t *ptl;
 228        struct page *page;
 229
 230        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 231        for (; addr != end; pte++, addr += PAGE_SIZE) {
 232                ptent = *pte;
 233                if (!pte_present(ptent))
 234                        continue;
 235
 236                mss->resident += PAGE_SIZE;
 237
 238                page = vm_normal_page(vma, addr, ptent);
 239                if (!page)
 240                        continue;
 241
 242                /* Accumulate the size in pages that have been accessed. */
 243                if (pte_young(ptent) || PageReferenced(page))
 244                        mss->referenced += PAGE_SIZE;
 245                if (page_mapcount(page) >= 2) {
 246                        if (pte_dirty(ptent))
 247                                mss->shared_dirty += PAGE_SIZE;
 248                        else
 249                                mss->shared_clean += PAGE_SIZE;
 250                } else {
 251                        if (pte_dirty(ptent))
 252                                mss->private_dirty += PAGE_SIZE;
 253                        else
 254                                mss->private_clean += PAGE_SIZE;
 255                }
 256        }
 257        pte_unmap_unlock(pte - 1, ptl);
 258        cond_resched();
 259}
 260
 261static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 262                                 unsigned long addr, unsigned long end,
 263                                 void *private)
 264{
 265        pte_t *pte, ptent;
 266        spinlock_t *ptl;
 267        struct page *page;
 268
 269        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 270        for (; addr != end; pte++, addr += PAGE_SIZE) {
 271                ptent = *pte;
 272                if (!pte_present(ptent))
 273                        continue;
 274
 275                page = vm_normal_page(vma, addr, ptent);
 276                if (!page)
 277                        continue;
 278
 279                /* Clear accessed and referenced bits. */
 280                ptep_test_and_clear_young(vma, addr, pte);
 281                ClearPageReferenced(page);
 282        }
 283        pte_unmap_unlock(pte - 1, ptl);
 284        cond_resched();
 285}
 286
 287static inline void walk_pmd_range(struct pmd_walker *walker, pud_t *pud,
 288                                  unsigned long addr, unsigned long end)
 289{
 290        pmd_t *pmd;
 291        unsigned long next;
 292
 293        for (pmd = pmd_offset(pud, addr); addr != end;
 294             pmd++, addr = next) {
 295                next = pmd_addr_end(addr, end);
 296                if (pmd_none_or_clear_bad(pmd))
 297                        continue;
 298                walker->action(walker->vma, pmd, addr, next, walker->private);
 299        }
 300}
 301
 302static inline void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd,
 303                                  unsigned long addr, unsigned long end)
 304{
 305        pud_t *pud;
 306        unsigned long next;
 307
 308        for (pud = pud_offset(pgd, addr); addr != end;
 309             pud++, addr = next) {
 310                next = pud_addr_end(addr, end);
 311                if (pud_none_or_clear_bad(pud))
 312                        continue;
 313                walk_pmd_range(walker, pud, addr, next);
 314        }
 315}
 316
 317/*
 318 * walk_page_range - walk the page tables of a VMA with a callback
 319 * @vma - VMA to walk
 320 * @action - callback invoked for every bottom-level (PTE) page table
 321 * @private - private data passed to the callback function
 322 *
 323 * Recursively walk the page table for the memory area in a VMA, calling
 324 * a callback for every bottom-level (PTE) page table.
 325 */
 326static inline void walk_page_range(struct vm_area_struct *vma,
 327                                   void (*action)(struct vm_area_struct *,
 328                                                  pmd_t *, unsigned long,
 329                                                  unsigned long, void *),
 330                                   void *private)
 331{
 332        unsigned long addr = vma->vm_start;
 333        unsigned long end = vma->vm_end;
 334        struct pmd_walker walker = {
 335                .vma            = vma,
 336                .private        = private,
 337                .action         = action,
 338        };
 339        pgd_t *pgd;
 340        unsigned long next;
 341
 342        for (pgd = pgd_offset(vma->vm_mm, addr); addr != end;
 343             pgd++, addr = next) {
 344                next = pgd_addr_end(addr, end);
 345                if (pgd_none_or_clear_bad(pgd))
 346                        continue;
 347                walk_pud_range(&walker, pgd, addr, next);
 348        }
 349}
 350
 351static int show_smap(struct seq_file *m, void *v)
 352{
 353        struct vm_area_struct *vma = v;
 354        struct mem_size_stats mss;
 355
 356        memset(&mss, 0, sizeof mss);
 357        if (vma->vm_mm && !is_vm_hugetlb_page(vma))
 358                walk_page_range(vma, smaps_pte_range, &mss);
 359        return show_map_internal(m, v, &mss);
 360}
 361
 362void clear_refs_smap(struct mm_struct *mm)
 363{
 364        struct vm_area_struct *vma;
 365
 366        down_read(&mm->mmap_sem);
 367        for (vma = mm->mmap; vma; vma = vma->vm_next)
 368                if (vma->vm_mm && !is_vm_hugetlb_page(vma))
 369                        walk_page_range(vma, clear_refs_pte_range, NULL);
 370        flush_tlb_mm(mm);
 371        up_read(&mm->mmap_sem);
 372}
 373
 374static void *m_start(struct seq_file *m, loff_t *pos)
 375{
 376        struct proc_maps_private *priv = m->private;
 377        unsigned long last_addr = m->version;
 378        struct mm_struct *mm;
 379        struct vm_area_struct *vma, *tail_vma = NULL;
 380        loff_t l = *pos;
 381
 382        /* Clear the per syscall fields in priv */
 383        priv->task = NULL;
 384        priv->tail_vma = NULL;
 385
 386        /*
 387         * We remember last_addr rather than next_addr to hit with
 388         * mmap_cache most of the time. We have zero last_addr at
 389         * the beginning and also after lseek. We will have -1 last_addr
 390         * after the end of the vmas.
 391         */
 392
 393        if (last_addr == -1UL)
 394                return NULL;
 395
 396        priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
 397        if (!priv->task)
 398                return NULL;
 399
 400        mm = mm_for_maps(priv->task);
 401        if (!mm)
 402                return NULL;
 403
 404        priv->tail_vma = tail_vma = get_gate_vma(priv->task);
 405
 406        /* Start with last addr hint */
 407        if (last_addr && (vma = find_vma(mm, last_addr))) {
 408                vma = vma->vm_next;
 409                goto out;
 410        }
 411
 412        /*
 413         * Check the vma index is within the range and do
 414         * sequential scan until m_index.
 415         */
 416        vma = NULL;
 417        if ((unsigned long)l < mm->map_count) {
 418                vma = mm->mmap;
 419                while (l-- && vma)
 420                        vma = vma->vm_next;
 421                goto out;
 422        }
 423
 424        if (l != mm->map_count)
 425                tail_vma = NULL; /* After gate vma */
 426
 427out:
 428        if (vma)
 429                return vma;
 430
 431        /* End of vmas has been reached */
 432        m->version = (tail_vma != NULL)? 0: -1UL;
 433        up_read(&mm->mmap_sem);
 434        mmput(mm);
 435        return tail_vma;
 436}
 437
 438static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
 439{
 440        if (vma && vma != priv->tail_vma) {
 441                struct mm_struct *mm = vma->vm_mm;
 442                up_read(&mm->mmap_sem);
 443                mmput(mm);
 444        }
 445}
 446
 447static void *m_next(struct seq_file *m, void *v, loff_t *pos)
 448{
 449        struct proc_maps_private *priv = m->private;
 450        struct vm_area_struct *vma = v;
 451        struct vm_area_struct *tail_vma = priv->tail_vma;
 452
 453        (*pos)++;
 454        if (vma && (vma != tail_vma) && vma->vm_next)
 455                return vma->vm_next;
 456        vma_stop(priv, vma);
 457        return (vma != tail_vma)? tail_vma: NULL;
 458}
 459
 460static void m_stop(struct seq_file *m, void *v)
 461{
 462        struct proc_maps_private *priv = m->private;
 463        struct vm_area_struct *vma = v;
 464
 465        vma_stop(priv, vma);
 466        if (priv->task)
 467                put_task_struct(priv->task);
 468}
 469
 470static struct seq_operations proc_pid_maps_op = {
 471        .start  = m_start,
 472        .next   = m_next,
 473        .stop   = m_stop,
 474        .show   = show_map
 475};
 476
 477static struct seq_operations proc_pid_smaps_op = {
 478        .start  = m_start,
 479        .next   = m_next,
 480        .stop   = m_stop,
 481        .show   = show_smap
 482};
 483
 484static int do_maps_open(struct inode *inode, struct file *file,
 485                        struct seq_operations *ops)
 486{
 487        struct proc_maps_private *priv;
 488        int ret = -ENOMEM;
 489        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 490        if (priv) {
 491                priv->pid = proc_pid(inode);
 492                ret = seq_open(file, ops);
 493                if (!ret) {
 494                        struct seq_file *m = file->private_data;
 495                        m->private = priv;
 496                } else {
 497                        kfree(priv);
 498                }
 499        }
 500        return ret;
 501}
 502
 503static int maps_open(struct inode *inode, struct file *file)
 504{
 505        return do_maps_open(inode, file, &proc_pid_maps_op);
 506}
 507
 508const struct file_operations proc_maps_operations = {
 509        .open           = maps_open,
 510        .read           = seq_read,
 511        .llseek         = seq_lseek,
 512        .release        = seq_release_private,
 513};
 514
 515#ifdef CONFIG_NUMA
 516extern int show_numa_map(struct seq_file *m, void *v);
 517
 518static int show_numa_map_checked(struct seq_file *m, void *v)
 519{
 520        struct proc_maps_private *priv = m->private;
 521        struct task_struct *task = priv->task;
 522
 523        if (maps_protect && !ptrace_may_attach(task))
 524                return -EACCES;
 525
 526        return show_numa_map(m, v);
 527}
 528
 529static struct seq_operations proc_pid_numa_maps_op = {
 530        .start  = m_start,
 531        .next   = m_next,
 532        .stop   = m_stop,
 533        .show   = show_numa_map_checked
 534};
 535
 536static int numa_maps_open(struct inode *inode, struct file *file)
 537{
 538        return do_maps_open(inode, file, &proc_pid_numa_maps_op);
 539}
 540
 541const struct file_operations proc_numa_maps_operations = {
 542        .open           = numa_maps_open,
 543        .read           = seq_read,
 544        .llseek         = seq_lseek,
 545        .release        = seq_release_private,
 546};
 547#endif
 548
 549static int smaps_open(struct inode *inode, struct file *file)
 550{
 551        return do_maps_open(inode, file, &proc_pid_smaps_op);
 552}
 553
 554const struct file_operations proc_smaps_operations = {
 555        .open           = smaps_open,
 556        .read           = seq_read,
 557        .llseek         = seq_lseek,
 558        .release        = seq_release_private,
 559};
 560
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.