linux/ipc/shm.c
<<
>>
Prefs
   1/*
   2 * linux/ipc/shm.c
   3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
   4 *       Many improvements/fixes by Bruno Haible.
   5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
   6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
   7 *
   8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
   9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15 *
  16 * support for audit of ipc object properties and permission changes
  17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18 *
  19 * namespaces support
  20 * OpenVZ, SWsoft Inc.
  21 * Pavel Emelianov <xemul@openvz.org>
  22 */
  23
  24#include <linux/slab.h>
  25#include <linux/mm.h>
  26#include <linux/hugetlb.h>
  27#include <linux/shm.h>
  28#include <linux/init.h>
  29#include <linux/file.h>
  30#include <linux/mman.h>
  31#include <linux/shmem_fs.h>
  32#include <linux/security.h>
  33#include <linux/syscalls.h>
  34#include <linux/audit.h>
  35#include <linux/capability.h>
  36#include <linux/ptrace.h>
  37#include <linux/seq_file.h>
  38#include <linux/rwsem.h>
  39#include <linux/nsproxy.h>
  40#include <linux/mount.h>
  41#include <linux/ipc_namespace.h>
  42
  43#include <asm/uaccess.h>
  44
  45#include "util.h"
  46
  47struct shm_file_data {
  48        int id;
  49        struct ipc_namespace *ns;
  50        struct file *file;
  51        const struct vm_operations_struct *vm_ops;
  52};
  53
  54#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  55
  56static const struct file_operations shm_file_operations;
  57static const struct vm_operations_struct shm_vm_ops;
  58
  59#define shm_ids(ns)     ((ns)->ids[IPC_SHM_IDS])
  60
  61#define shm_unlock(shp)                 \
  62        ipc_unlock(&(shp)->shm_perm)
  63
  64static int newseg(struct ipc_namespace *, struct ipc_params *);
  65static void shm_open(struct vm_area_struct *vma);
  66static void shm_close(struct vm_area_struct *vma);
  67static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  68#ifdef CONFIG_PROC_FS
  69static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  70#endif
  71
  72void shm_init_ns(struct ipc_namespace *ns)
  73{
  74        ns->shm_ctlmax = SHMMAX;
  75        ns->shm_ctlall = SHMALL;
  76        ns->shm_ctlmni = SHMMNI;
  77        ns->shm_rmid_forced = 0;
  78        ns->shm_tot = 0;
  79        ipc_init_ids(&shm_ids(ns));
  80}
  81
  82/*
  83 * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
  84 * Only shm_ids.rw_mutex remains locked on exit.
  85 */
  86static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  87{
  88        struct shmid_kernel *shp;
  89        shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  90
  91        if (shp->shm_nattch){
  92                shp->shm_perm.mode |= SHM_DEST;
  93                /* Do not find it any more */
  94                shp->shm_perm.key = IPC_PRIVATE;
  95                shm_unlock(shp);
  96        } else
  97                shm_destroy(ns, shp);
  98}
  99
 100#ifdef CONFIG_IPC_NS
 101void shm_exit_ns(struct ipc_namespace *ns)
 102{
 103        free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
 104        idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
 105}
 106#endif
 107
 108static int __init ipc_ns_init(void)
 109{
 110        shm_init_ns(&init_ipc_ns);
 111        return 0;
 112}
 113
 114pure_initcall(ipc_ns_init);
 115
 116void __init shm_init (void)
 117{
 118        ipc_init_proc_interface("sysvipc/shm",
 119#if BITS_PER_LONG <= 32
 120                                "       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
 121#else
 122                                "       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
 123#endif
 124                                IPC_SHM_IDS, sysvipc_shm_proc_show);
 125}
 126
 127/*
 128 * shm_lock_(check_) routines are called in the paths where the rw_mutex
 129 * is not necessarily held.
 130 */
 131static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
 132{
 133        struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
 134
 135        if (IS_ERR(ipcp))
 136                return (struct shmid_kernel *)ipcp;
 137
 138        return container_of(ipcp, struct shmid_kernel, shm_perm);
 139}
 140
 141static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
 142{
 143        rcu_read_lock();
 144        ipc_lock_object(&ipcp->shm_perm);
 145}
 146
 147static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
 148                                                int id)
 149{
 150        struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
 151
 152        if (IS_ERR(ipcp))
 153                return (struct shmid_kernel *)ipcp;
 154
 155        return container_of(ipcp, struct shmid_kernel, shm_perm);
 156}
 157
 158static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
 159{
 160        ipc_rmid(&shm_ids(ns), &s->shm_perm);
 161}
 162
 163
 164/* This is called by fork, once for every shm attach. */
 165static void shm_open(struct vm_area_struct *vma)
 166{
 167        struct file *file = vma->vm_file;
 168        struct shm_file_data *sfd = shm_file_data(file);
 169        struct shmid_kernel *shp;
 170
 171        shp = shm_lock(sfd->ns, sfd->id);
 172        BUG_ON(IS_ERR(shp));
 173        shp->shm_atim = get_seconds();
 174        shp->shm_lprid = task_tgid_vnr(current);
 175        shp->shm_nattch++;
 176        shm_unlock(shp);
 177}
 178
 179/*
 180 * shm_destroy - free the struct shmid_kernel
 181 *
 182 * @ns: namespace
 183 * @shp: struct to free
 184 *
 185 * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
 186 * but returns with shp unlocked and freed.
 187 */
 188static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 189{
 190        ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
 191        shm_rmid(ns, shp);
 192        shm_unlock(shp);
 193        if (!is_file_hugepages(shp->shm_file))
 194                shmem_lock(shp->shm_file, 0, shp->mlock_user);
 195        else if (shp->mlock_user)
 196                user_shm_unlock(file_inode(shp->shm_file)->i_size,
 197                                                shp->mlock_user);
 198        fput (shp->shm_file);
 199        security_shm_free(shp);
 200        ipc_rcu_putref(shp);
 201}
 202
 203/*
 204 * shm_may_destroy - identifies whether shm segment should be destroyed now
 205 *
 206 * Returns true if and only if there are no active users of the segment and
 207 * one of the following is true:
 208 *
 209 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
 210 *
 211 * 2) sysctl kernel.shm_rmid_forced is set to 1.
 212 */
 213static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 214{
 215        return (shp->shm_nattch == 0) &&
 216               (ns->shm_rmid_forced ||
 217                (shp->shm_perm.mode & SHM_DEST));
 218}
 219
 220/*
 221 * remove the attach descriptor vma.
 222 * free memory for segment if it is marked destroyed.
 223 * The descriptor has already been removed from the current->mm->mmap list
 224 * and will later be kfree()d.
 225 */
 226static void shm_close(struct vm_area_struct *vma)
 227{
 228        struct file * file = vma->vm_file;
 229        struct shm_file_data *sfd = shm_file_data(file);
 230        struct shmid_kernel *shp;
 231        struct ipc_namespace *ns = sfd->ns;
 232
 233        down_write(&shm_ids(ns).rw_mutex);
 234        /* remove from the list of attaches of the shm segment */
 235        shp = shm_lock(ns, sfd->id);
 236        BUG_ON(IS_ERR(shp));
 237        shp->shm_lprid = task_tgid_vnr(current);
 238        shp->shm_dtim = get_seconds();
 239        shp->shm_nattch--;
 240        if (shm_may_destroy(ns, shp))
 241                shm_destroy(ns, shp);
 242        else
 243                shm_unlock(shp);
 244        up_write(&shm_ids(ns).rw_mutex);
 245}
 246
 247/* Called with ns->shm_ids(ns).rw_mutex locked */
 248static int shm_try_destroy_current(int id, void *p, void *data)
 249{
 250        struct ipc_namespace *ns = data;
 251        struct kern_ipc_perm *ipcp = p;
 252        struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 253
 254        if (shp->shm_creator != current)
 255                return 0;
 256
 257        /*
 258         * Mark it as orphaned to destroy the segment when
 259         * kernel.shm_rmid_forced is changed.
 260         * It is noop if the following shm_may_destroy() returns true.
 261         */
 262        shp->shm_creator = NULL;
 263
 264        /*
 265         * Don't even try to destroy it.  If shm_rmid_forced=0 and IPC_RMID
 266         * is not set, it shouldn't be deleted here.
 267         */
 268        if (!ns->shm_rmid_forced)
 269                return 0;
 270
 271        if (shm_may_destroy(ns, shp)) {
 272                shm_lock_by_ptr(shp);
 273                shm_destroy(ns, shp);
 274        }
 275        return 0;
 276}
 277
 278/* Called with ns->shm_ids(ns).rw_mutex locked */
 279static int shm_try_destroy_orphaned(int id, void *p, void *data)
 280{
 281        struct ipc_namespace *ns = data;
 282        struct kern_ipc_perm *ipcp = p;
 283        struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 284
 285        /*
 286         * We want to destroy segments without users and with already
 287         * exit'ed originating process.
 288         *
 289         * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
 290         */
 291        if (shp->shm_creator != NULL)
 292                return 0;
 293
 294        if (shm_may_destroy(ns, shp)) {
 295                shm_lock_by_ptr(shp);
 296                shm_destroy(ns, shp);
 297        }
 298        return 0;
 299}
 300
 301void shm_destroy_orphaned(struct ipc_namespace *ns)
 302{
 303        down_write(&shm_ids(ns).rw_mutex);
 304        if (shm_ids(ns).in_use)
 305                idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
 306        up_write(&shm_ids(ns).rw_mutex);
 307}
 308
 309
 310void exit_shm(struct task_struct *task)
 311{
 312        struct ipc_namespace *ns = task->nsproxy->ipc_ns;
 313
 314        if (shm_ids(ns).in_use == 0)
 315                return;
 316
 317        /* Destroy all already created segments, but not mapped yet */
 318        down_write(&shm_ids(ns).rw_mutex);
 319        if (shm_ids(ns).in_use)
 320                idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
 321        up_write(&shm_ids(ns).rw_mutex);
 322}
 323
 324static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 325{
 326        struct file *file = vma->vm_file;
 327        struct shm_file_data *sfd = shm_file_data(file);
 328
 329        return sfd->vm_ops->fault(vma, vmf);
 330}
 331
 332#ifdef CONFIG_NUMA
 333static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
 334{
 335        struct file *file = vma->vm_file;
 336        struct shm_file_data *sfd = shm_file_data(file);
 337        int err = 0;
 338        if (sfd->vm_ops->set_policy)
 339                err = sfd->vm_ops->set_policy(vma, new);
 340        return err;
 341}
 342
 343static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
 344                                        unsigned long addr)
 345{
 346        struct file *file = vma->vm_file;
 347        struct shm_file_data *sfd = shm_file_data(file);
 348        struct mempolicy *pol = NULL;
 349
 350        if (sfd->vm_ops->get_policy)
 351                pol = sfd->vm_ops->get_policy(vma, addr);
 352        else if (vma->vm_policy)
 353                pol = vma->vm_policy;
 354
 355        return pol;
 356}
 357#endif
 358
 359static int shm_mmap(struct file * file, struct vm_area_struct * vma)
 360{
 361        struct shm_file_data *sfd = shm_file_data(file);
 362        int ret;
 363
 364        ret = sfd->file->f_op->mmap(sfd->file, vma);
 365        if (ret != 0)
 366                return ret;
 367        sfd->vm_ops = vma->vm_ops;
 368#ifdef CONFIG_MMU
 369        BUG_ON(!sfd->vm_ops->fault);
 370#endif
 371        vma->vm_ops = &shm_vm_ops;
 372        shm_open(vma);
 373
 374        return ret;
 375}
 376
 377static int shm_release(struct inode *ino, struct file *file)
 378{
 379        struct shm_file_data *sfd = shm_file_data(file);
 380
 381        put_ipc_ns(sfd->ns);
 382        shm_file_data(file) = NULL;
 383        kfree(sfd);
 384        return 0;
 385}
 386
 387static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 388{
 389        struct shm_file_data *sfd = shm_file_data(file);
 390
 391        if (!sfd->file->f_op->fsync)
 392                return -EINVAL;
 393        return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
 394}
 395
 396static long shm_fallocate(struct file *file, int mode, loff_t offset,
 397                          loff_t len)
 398{
 399        struct shm_file_data *sfd = shm_file_data(file);
 400
 401        if (!sfd->file->f_op->fallocate)
 402                return -EOPNOTSUPP;
 403        return sfd->file->f_op->fallocate(file, mode, offset, len);
 404}
 405
 406static unsigned long shm_get_unmapped_area(struct file *file,
 407        unsigned long addr, unsigned long len, unsigned long pgoff,
 408        unsigned long flags)
 409{
 410        struct shm_file_data *sfd = shm_file_data(file);
 411        return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
 412                                                pgoff, flags);
 413}
 414
 415static const struct file_operations shm_file_operations = {
 416        .mmap           = shm_mmap,
 417        .fsync          = shm_fsync,
 418        .release        = shm_release,
 419#ifndef CONFIG_MMU
 420        .get_unmapped_area      = shm_get_unmapped_area,
 421#endif
 422        .llseek         = noop_llseek,
 423        .fallocate      = shm_fallocate,
 424};
 425
 426static const struct file_operations shm_file_operations_huge = {
 427        .mmap           = shm_mmap,
 428        .fsync          = shm_fsync,
 429        .release        = shm_release,
 430        .get_unmapped_area      = shm_get_unmapped_area,
 431        .llseek         = noop_llseek,
 432        .fallocate      = shm_fallocate,
 433};
 434
 435int is_file_shm_hugepages(struct file *file)
 436{
 437        return file->f_op == &shm_file_operations_huge;
 438}
 439
 440static const struct vm_operations_struct shm_vm_ops = {
 441        .open   = shm_open,     /* callback for a new vm-area open */
 442        .close  = shm_close,    /* callback for when the vm-area is released */
 443        .fault  = shm_fault,
 444#if defined(CONFIG_NUMA)
 445        .set_policy = shm_set_policy,
 446        .get_policy = shm_get_policy,
 447#endif
 448};
 449
 450/**
 451 * newseg - Create a new shared memory segment
 452 * @ns: namespace
 453 * @params: ptr to the structure that contains key, size and shmflg
 454 *
 455 * Called with shm_ids.rw_mutex held as a writer.
 456 */
 457
 458static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
 459{
 460        key_t key = params->key;
 461        int shmflg = params->flg;
 462        size_t size = params->u.size;
 463        int error;
 464        struct shmid_kernel *shp;
 465        size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 466        struct file * file;
 467        char name[13];
 468        int id;
 469        vm_flags_t acctflag = 0;
 470
 471        if (size < SHMMIN || size > ns->shm_ctlmax)
 472                return -EINVAL;
 473
 474        if (ns->shm_tot + numpages > ns->shm_ctlall)
 475                return -ENOSPC;
 476
 477        shp = ipc_rcu_alloc(sizeof(*shp));
 478        if (!shp)
 479                return -ENOMEM;
 480
 481        shp->shm_perm.key = key;
 482        shp->shm_perm.mode = (shmflg & S_IRWXUGO);
 483        shp->mlock_user = NULL;
 484
 485        shp->shm_perm.security = NULL;
 486        error = security_shm_alloc(shp);
 487        if (error) {
 488                ipc_rcu_putref(shp);
 489                return error;
 490        }
 491
 492        sprintf (name, "SYSV%08x", key);
 493        if (shmflg & SHM_HUGETLB) {
 494                struct hstate *hs;
 495                size_t hugesize;
 496
 497                hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 498                if (!hs) {
 499                        error = -EINVAL;
 500                        goto no_file;
 501                }
 502                hugesize = ALIGN(size, huge_page_size(hs));
 503
 504                /* hugetlb_file_setup applies strict accounting */
 505                if (shmflg & SHM_NORESERVE)
 506                        acctflag = VM_NORESERVE;
 507                file = hugetlb_file_setup(name, hugesize, acctflag,
 508                                  &shp->mlock_user, HUGETLB_SHMFS_INODE,
 509                                (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 510        } else {
 511                /*
 512                 * Do not allow no accounting for OVERCOMMIT_NEVER, even
 513                 * if it's asked for.
 514                 */
 515                if  ((shmflg & SHM_NORESERVE) &&
 516                                sysctl_overcommit_memory != OVERCOMMIT_NEVER)
 517                        acctflag = VM_NORESERVE;
 518                file = shmem_file_setup(name, size, acctflag);
 519        }
 520        error = PTR_ERR(file);
 521        if (IS_ERR(file))
 522                goto no_file;
 523
 524        id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
 525        if (id < 0) {
 526                error = id;
 527                goto no_id;
 528        }
 529
 530        shp->shm_cprid = task_tgid_vnr(current);
 531        shp->shm_lprid = 0;
 532        shp->shm_atim = shp->shm_dtim = 0;
 533        shp->shm_ctim = get_seconds();
 534        shp->shm_segsz = size;
 535        shp->shm_nattch = 0;
 536        shp->shm_file = file;
 537        shp->shm_creator = current;
 538
 539        /*
 540         * shmid gets reported as "inode#" in /proc/pid/maps.
 541         * proc-ps tools use this. Changing this will break them.
 542         */
 543        file_inode(file)->i_ino = shp->shm_perm.id;
 544
 545        ns->shm_tot += numpages;
 546        error = shp->shm_perm.id;
 547
 548        ipc_unlock_object(&shp->shm_perm);
 549        rcu_read_unlock();
 550        return error;
 551
 552no_id:
 553        if (is_file_hugepages(file) && shp->mlock_user)
 554                user_shm_unlock(size, shp->mlock_user);
 555        fput(file);
 556no_file:
 557        security_shm_free(shp);
 558        ipc_rcu_putref(shp);
 559        return error;
 560}
 561
 562/*
 563 * Called with shm_ids.rw_mutex and ipcp locked.
 564 */
 565static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
 566{
 567        struct shmid_kernel *shp;
 568
 569        shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 570        return security_shm_associate(shp, shmflg);
 571}
 572
 573/*
 574 * Called with shm_ids.rw_mutex and ipcp locked.
 575 */
 576static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
 577                                struct ipc_params *params)
 578{
 579        struct shmid_kernel *shp;
 580
 581        shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 582        if (shp->shm_segsz < params->u.size)
 583                return -EINVAL;
 584
 585        return 0;
 586}
 587
 588SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
 589{
 590        struct ipc_namespace *ns;
 591        struct ipc_ops shm_ops;
 592        struct ipc_params shm_params;
 593
 594        ns = current->nsproxy->ipc_ns;
 595
 596        shm_ops.getnew = newseg;
 597        shm_ops.associate = shm_security;
 598        shm_ops.more_checks = shm_more_checks;
 599
 600        shm_params.key = key;
 601        shm_params.flg = shmflg;
 602        shm_params.u.size = size;
 603
 604        return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
 605}
 606
 607static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
 608{
 609        switch(version) {
 610        case IPC_64:
 611                return copy_to_user(buf, in, sizeof(*in));
 612        case IPC_OLD:
 613            {
 614                struct shmid_ds out;
 615
 616                memset(&out, 0, sizeof(out));
 617                ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
 618                out.shm_segsz   = in->shm_segsz;
 619                out.shm_atime   = in->shm_atime;
 620                out.shm_dtime   = in->shm_dtime;
 621                out.shm_ctime   = in->shm_ctime;
 622                out.shm_cpid    = in->shm_cpid;
 623                out.shm_lpid    = in->shm_lpid;
 624                out.shm_nattch  = in->shm_nattch;
 625
 626                return copy_to_user(buf, &out, sizeof(out));
 627            }
 628        default:
 629                return -EINVAL;
 630        }
 631}
 632
 633static inline unsigned long
 634copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
 635{
 636        switch(version) {
 637        case IPC_64:
 638                if (copy_from_user(out, buf, sizeof(*out)))
 639                        return -EFAULT;
 640                return 0;
 641        case IPC_OLD:
 642            {
 643                struct shmid_ds tbuf_old;
 644
 645                if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 646                        return -EFAULT;
 647
 648                out->shm_perm.uid       = tbuf_old.shm_perm.uid;
 649                out->shm_perm.gid       = tbuf_old.shm_perm.gid;
 650                out->shm_perm.mode      = tbuf_old.shm_perm.mode;
 651
 652                return 0;
 653            }
 654        default:
 655                return -EINVAL;
 656        }
 657}
 658
 659static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
 660{
 661        switch(version) {
 662        case IPC_64:
 663                return copy_to_user(buf, in, sizeof(*in));
 664        case IPC_OLD:
 665            {
 666                struct shminfo out;
 667
 668                if(in->shmmax > INT_MAX)
 669                        out.shmmax = INT_MAX;
 670                else
 671                        out.shmmax = (int)in->shmmax;
 672
 673                out.shmmin      = in->shmmin;
 674                out.shmmni      = in->shmmni;
 675                out.shmseg      = in->shmseg;
 676                out.shmall      = in->shmall; 
 677
 678                return copy_to_user(buf, &out, sizeof(out));
 679            }
 680        default:
 681                return -EINVAL;
 682        }
 683}
 684
 685/*
 686 * Calculate and add used RSS and swap pages of a shm.
 687 * Called with shm_ids.rw_mutex held as a reader
 688 */
 689static void shm_add_rss_swap(struct shmid_kernel *shp,
 690        unsigned long *rss_add, unsigned long *swp_add)
 691{
 692        struct inode *inode;
 693
 694        inode = file_inode(shp->shm_file);
 695
 696        if (is_file_hugepages(shp->shm_file)) {
 697                struct address_space *mapping = inode->i_mapping;
 698                struct hstate *h = hstate_file(shp->shm_file);
 699                *rss_add += pages_per_huge_page(h) * mapping->nrpages;
 700        } else {
 701#ifdef CONFIG_SHMEM
 702                struct shmem_inode_info *info = SHMEM_I(inode);
 703                spin_lock(&info->lock);
 704                *rss_add += inode->i_mapping->nrpages;
 705                *swp_add += info->swapped;
 706                spin_unlock(&info->lock);
 707#else
 708                *rss_add += inode->i_mapping->nrpages;
 709#endif
 710        }
 711}
 712
 713/*
 714 * Called with shm_ids.rw_mutex held as a reader
 715 */
 716static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
 717                unsigned long *swp)
 718{
 719        int next_id;
 720        int total, in_use;
 721
 722        *rss = 0;
 723        *swp = 0;
 724
 725        in_use = shm_ids(ns).in_use;
 726
 727        for (total = 0, next_id = 0; total < in_use; next_id++) {
 728                struct kern_ipc_perm *ipc;
 729                struct shmid_kernel *shp;
 730
 731                ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
 732                if (ipc == NULL)
 733                        continue;
 734                shp = container_of(ipc, struct shmid_kernel, shm_perm);
 735
 736                shm_add_rss_swap(shp, rss, swp);
 737
 738                total++;
 739        }
 740}
 741
 742/*
 743 * This function handles some shmctl commands which require the rw_mutex
 744 * to be held in write mode.
 745 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
 746 */
 747static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
 748                       struct shmid_ds __user *buf, int version)
 749{
 750        struct kern_ipc_perm *ipcp;
 751        struct shmid64_ds shmid64;
 752        struct shmid_kernel *shp;
 753        int err;
 754
 755        if (cmd == IPC_SET) {
 756                if (copy_shmid_from_user(&shmid64, buf, version))
 757                        return -EFAULT;
 758        }
 759
 760        down_write(&shm_ids(ns).rw_mutex);
 761        rcu_read_lock();
 762
 763        ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
 764                               &shmid64.shm_perm, 0);
 765        if (IS_ERR(ipcp)) {
 766                err = PTR_ERR(ipcp);
 767                /* the ipc lock is not held upon failure */
 768                goto out_unlock1;
 769        }
 770
 771        shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 772
 773        err = security_shm_shmctl(shp, cmd);
 774        if (err)
 775                goto out_unlock0;
 776
 777        switch (cmd) {
 778        case IPC_RMID:
 779                /* do_shm_rmid unlocks the ipc object and rcu */
 780                do_shm_rmid(ns, ipcp);
 781                goto out_up;
 782        case IPC_SET:
 783                err = ipc_update_perm(&shmid64.shm_perm, ipcp);
 784                if (err)
 785                        goto out_unlock0;
 786                shp->shm_ctim = get_seconds();
 787                break;
 788        default:
 789                err = -EINVAL;
 790        }
 791
 792out_unlock0:
 793        ipc_unlock_object(&shp->shm_perm);
 794out_unlock1:
 795        rcu_read_unlock();
 796out_up:
 797        up_write(&shm_ids(ns).rw_mutex);
 798        return err;
 799}
 800
 801SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
 802{
 803        struct shmid_kernel *shp;
 804        int err, version;
 805        struct ipc_namespace *ns;
 806
 807        if (cmd < 0 || shmid < 0) {
 808                err = -EINVAL;
 809                goto out;
 810        }
 811
 812        version = ipc_parse_version(&cmd);
 813        ns = current->nsproxy->ipc_ns;
 814
 815        switch (cmd) { /* replace with proc interface ? */
 816        case IPC_INFO:
 817        {
 818                struct shminfo64 shminfo;
 819
 820                err = security_shm_shmctl(NULL, cmd);
 821                if (err)
 822                        return err;
 823
 824                memset(&shminfo, 0, sizeof(shminfo));
 825                shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
 826                shminfo.shmmax = ns->shm_ctlmax;
 827                shminfo.shmall = ns->shm_ctlall;
 828
 829                shminfo.shmmin = SHMMIN;
 830                if(copy_shminfo_to_user (buf, &shminfo, version))
 831                        return -EFAULT;
 832
 833                down_read(&shm_ids(ns).rw_mutex);
 834                err = ipc_get_maxid(&shm_ids(ns));
 835                up_read(&shm_ids(ns).rw_mutex);
 836
 837                if(err<0)
 838                        err = 0;
 839                goto out;
 840        }
 841        case SHM_INFO:
 842        {
 843                struct shm_info shm_info;
 844
 845                err = security_shm_shmctl(NULL, cmd);
 846                if (err)
 847                        return err;
 848
 849                memset(&shm_info, 0, sizeof(shm_info));
 850                down_read(&shm_ids(ns).rw_mutex);
 851                shm_info.used_ids = shm_ids(ns).in_use;
 852                shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
 853                shm_info.shm_tot = ns->shm_tot;
 854                shm_info.swap_attempts = 0;
 855                shm_info.swap_successes = 0;
 856                err = ipc_get_maxid(&shm_ids(ns));
 857                up_read(&shm_ids(ns).rw_mutex);
 858                if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
 859                        err = -EFAULT;
 860                        goto out;
 861                }
 862
 863                err = err < 0 ? 0 : err;
 864                goto out;
 865        }
 866        case SHM_STAT:
 867        case IPC_STAT:
 868        {
 869                struct shmid64_ds tbuf;
 870                int result;
 871
 872                if (cmd == SHM_STAT) {
 873                        shp = shm_lock(ns, shmid);
 874                        if (IS_ERR(shp)) {
 875                                err = PTR_ERR(shp);
 876                                goto out;
 877                        }
 878                        result = shp->shm_perm.id;
 879                } else {
 880                        shp = shm_lock_check(ns, shmid);
 881                        if (IS_ERR(shp)) {
 882                                err = PTR_ERR(shp);
 883                                goto out;
 884                        }
 885                        result = 0;
 886                }
 887                err = -EACCES;
 888                if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
 889                        goto out_unlock;
 890                err = security_shm_shmctl(shp, cmd);
 891                if (err)
 892                        goto out_unlock;
 893                memset(&tbuf, 0, sizeof(tbuf));
 894                kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
 895                tbuf.shm_segsz  = shp->shm_segsz;
 896                tbuf.shm_atime  = shp->shm_atim;
 897                tbuf.shm_dtime  = shp->shm_dtim;
 898                tbuf.shm_ctime  = shp->shm_ctim;
 899                tbuf.shm_cpid   = shp->shm_cprid;
 900                tbuf.shm_lpid   = shp->shm_lprid;
 901                tbuf.shm_nattch = shp->shm_nattch;
 902                shm_unlock(shp);
 903                if(copy_shmid_to_user (buf, &tbuf, version))
 904                        err = -EFAULT;
 905                else
 906                        err = result;
 907                goto out;
 908        }
 909        case SHM_LOCK:
 910        case SHM_UNLOCK:
 911        {
 912                struct file *shm_file;
 913
 914                shp = shm_lock_check(ns, shmid);
 915                if (IS_ERR(shp)) {
 916                        err = PTR_ERR(shp);
 917                        goto out;
 918                }
 919
 920                audit_ipc_obj(&(shp->shm_perm));
 921
 922                if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
 923                        kuid_t euid = current_euid();
 924                        err = -EPERM;
 925                        if (!uid_eq(euid, shp->shm_perm.uid) &&
 926                            !uid_eq(euid, shp->shm_perm.cuid))
 927                                goto out_unlock;
 928                        if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
 929                                goto out_unlock;
 930                }
 931
 932                err = security_shm_shmctl(shp, cmd);
 933                if (err)
 934                        goto out_unlock;
 935
 936                shm_file = shp->shm_file;
 937                if (is_file_hugepages(shm_file))
 938                        goto out_unlock;
 939
 940                if (cmd == SHM_LOCK) {
 941                        struct user_struct *user = current_user();
 942                        err = shmem_lock(shm_file, 1, user);
 943                        if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
 944                                shp->shm_perm.mode |= SHM_LOCKED;
 945                                shp->mlock_user = user;
 946                        }
 947                        goto out_unlock;
 948                }
 949
 950                /* SHM_UNLOCK */
 951                if (!(shp->shm_perm.mode & SHM_LOCKED))
 952                        goto out_unlock;
 953                shmem_lock(shm_file, 0, shp->mlock_user);
 954                shp->shm_perm.mode &= ~SHM_LOCKED;
 955                shp->mlock_user = NULL;
 956                get_file(shm_file);
 957                shm_unlock(shp);
 958                shmem_unlock_mapping(shm_file->f_mapping);
 959                fput(shm_file);
 960                goto out;
 961        }
 962        case IPC_RMID:
 963        case IPC_SET:
 964                err = shmctl_down(ns, shmid, cmd, buf, version);
 965                return err;
 966        default:
 967                return -EINVAL;
 968        }
 969
 970out_unlock:
 971        shm_unlock(shp);
 972out:
 973        return err;
 974}
 975
 976/*
 977 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
 978 *
 979 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
 980 * "raddr" thing points to kernel space, and there has to be a wrapper around
 981 * this.
 982 */
 983long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
 984              unsigned long shmlba)
 985{
 986        struct shmid_kernel *shp;
 987        unsigned long addr;
 988        unsigned long size;
 989        struct file * file;
 990        int    err;
 991        unsigned long flags;
 992        unsigned long prot;
 993        int acc_mode;
 994        struct ipc_namespace *ns;
 995        struct shm_file_data *sfd;
 996        struct path path;
 997        fmode_t f_mode;
 998        unsigned long populate = 0;
 999
1000        err = -EINVAL;
1001        if (shmid < 0)
1002                goto out;
1003        else if ((addr = (ulong)shmaddr)) {
1004                if (addr & (shmlba - 1)) {
1005                        if (shmflg & SHM_RND)
1006                                addr &= ~(shmlba - 1);     /* round down */
1007                        else
1008#ifndef __ARCH_FORCE_SHMLBA
1009                                if (addr & ~PAGE_MASK)
1010#endif
1011                                        goto out;
1012                }
1013                flags = MAP_SHARED | MAP_FIXED;
1014        } else {
1015                if ((shmflg & SHM_REMAP))
1016                        goto out;
1017
1018                flags = MAP_SHARED;
1019        }
1020
1021        if (shmflg & SHM_RDONLY) {
1022                prot = PROT_READ;
1023                acc_mode = S_IRUGO;
1024                f_mode = FMODE_READ;
1025        } else {
1026                prot = PROT_READ | PROT_WRITE;
1027                acc_mode = S_IRUGO | S_IWUGO;
1028                f_mode = FMODE_READ | FMODE_WRITE;
1029        }
1030        if (shmflg & SHM_EXEC) {
1031                prot |= PROT_EXEC;
1032                acc_mode |= S_IXUGO;
1033        }
1034
1035        /*
1036         * We cannot rely on the fs check since SYSV IPC does have an
1037         * additional creator id...
1038         */
1039        ns = current->nsproxy->ipc_ns;
1040        shp = shm_lock_check(ns, shmid);
1041        if (IS_ERR(shp)) {
1042                err = PTR_ERR(shp);
1043                goto out;
1044        }
1045
1046        err = -EACCES;
1047        if (ipcperms(ns, &shp->shm_perm, acc_mode))
1048                goto out_unlock;
1049
1050        err = security_shm_shmat(shp, shmaddr, shmflg);
1051        if (err)
1052                goto out_unlock;
1053
1054        path = shp->shm_file->f_path;
1055        path_get(&path);
1056        shp->shm_nattch++;
1057        size = i_size_read(path.dentry->d_inode);
1058        shm_unlock(shp);
1059
1060        err = -ENOMEM;
1061        sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1062        if (!sfd)
1063                goto out_put_dentry;
1064
1065        file = alloc_file(&path, f_mode,
1066                          is_file_hugepages(shp->shm_file) ?
1067                                &shm_file_operations_huge :
1068                                &shm_file_operations);
1069        err = PTR_ERR(file);
1070        if (IS_ERR(file))
1071                goto out_free;
1072
1073        file->private_data = sfd;
1074        file->f_mapping = shp->shm_file->f_mapping;
1075        sfd->id = shp->shm_perm.id;
1076        sfd->ns = get_ipc_ns(ns);
1077        sfd->file = shp->shm_file;
1078        sfd->vm_ops = NULL;
1079
1080        err = security_mmap_file(file, prot, flags);
1081        if (err)
1082                goto out_fput;
1083
1084        down_write(&current->mm->mmap_sem);
1085        if (addr && !(shmflg & SHM_REMAP)) {
1086                err = -EINVAL;
1087                if (find_vma_intersection(current->mm, addr, addr + size))
1088                        goto invalid;
1089                /*
1090                 * If shm segment goes below stack, make sure there is some
1091                 * space left for the stack to grow (at least 4 pages).
1092                 */
1093                if (addr < current->mm->start_stack &&
1094                    addr > current->mm->start_stack - size - PAGE_SIZE * 5)
1095                        goto invalid;
1096        }
1097                
1098        addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1099        *raddr = addr;
1100        err = 0;
1101        if (IS_ERR_VALUE(addr))
1102                err = (long)addr;
1103invalid:
1104        up_write(&current->mm->mmap_sem);
1105        if (populate)
1106                mm_populate(addr, populate);
1107
1108out_fput:
1109        fput(file);
1110
1111out_nattch:
1112        down_write(&shm_ids(ns).rw_mutex);
1113        shp = shm_lock(ns, shmid);
1114        BUG_ON(IS_ERR(shp));
1115        shp->shm_nattch--;
1116        if (shm_may_destroy(ns, shp))
1117                shm_destroy(ns, shp);
1118        else
1119                shm_unlock(shp);
1120        up_write(&shm_ids(ns).rw_mutex);
1121
1122out:
1123        return err;
1124
1125out_unlock:
1126        shm_unlock(shp);
1127        goto out;
1128
1129out_free:
1130        kfree(sfd);
1131out_put_dentry:
1132        path_put(&path);
1133        goto out_nattch;
1134}
1135
1136SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1137{
1138        unsigned long ret;
1139        long err;
1140
1141        err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1142        if (err)
1143                return err;
1144        force_successful_syscall_return();
1145        return (long)ret;
1146}
1147
1148/*
1149 * detach and kill segment if marked destroyed.
1150 * The work is done in shm_close.
1151 */
1152SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1153{
1154        struct mm_struct *mm = current->mm;
1155        struct vm_area_struct *vma;
1156        unsigned long addr = (unsigned long)shmaddr;
1157        int retval = -EINVAL;
1158#ifdef CONFIG_MMU
1159        loff_t size = 0;
1160        struct vm_area_struct *next;
1161#endif
1162
1163        if (addr & ~PAGE_MASK)
1164                return retval;
1165
1166        down_write(&mm->mmap_sem);
1167
1168        /*
1169         * This function tries to be smart and unmap shm segments that
1170         * were modified by partial mlock or munmap calls:
1171         * - It first determines the size of the shm segment that should be
1172         *   unmapped: It searches for a vma that is backed by shm and that
1173         *   started at address shmaddr. It records it's size and then unmaps
1174         *   it.
1175         * - Then it unmaps all shm vmas that started at shmaddr and that
1176         *   are within the initially determined size.
1177         * Errors from do_munmap are ignored: the function only fails if
1178         * it's called with invalid parameters or if it's called to unmap
1179         * a part of a vma. Both calls in this function are for full vmas,
1180         * the parameters are directly copied from the vma itself and always
1181         * valid - therefore do_munmap cannot fail. (famous last words?)
1182         */
1183        /*
1184         * If it had been mremap()'d, the starting address would not
1185         * match the usual checks anyway. So assume all vma's are
1186         * above the starting address given.
1187         */
1188        vma = find_vma(mm, addr);
1189
1190#ifdef CONFIG_MMU
1191        while (vma) {
1192                next = vma->vm_next;
1193
1194                /*
1195                 * Check if the starting address would match, i.e. it's
1196                 * a fragment created by mprotect() and/or munmap(), or it
1197                 * otherwise it starts at this address with no hassles.
1198                 */
1199                if ((vma->vm_ops == &shm_vm_ops) &&
1200                        (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1201
1202
1203                        size = file_inode(vma->vm_file)->i_size;
1204                        do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1205                        /*
1206                         * We discovered the size of the shm segment, so
1207                         * break out of here and fall through to the next
1208                         * loop that uses the size information to stop
1209                         * searching for matching vma's.
1210                         */
1211                        retval = 0;
1212                        vma = next;
1213                        break;
1214                }
1215                vma = next;
1216        }
1217
1218        /*
1219         * We need look no further than the maximum address a fragment
1220         * could possibly have landed at. Also cast things to loff_t to
1221         * prevent overflows and make comparisons vs. equal-width types.
1222         */
1223        size = PAGE_ALIGN(size);
1224        while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1225                next = vma->vm_next;
1226
1227                /* finding a matching vma now does not alter retval */
1228                if ((vma->vm_ops == &shm_vm_ops) &&
1229                        (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1230
1231                        do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1232                vma = next;
1233        }
1234
1235#else /* CONFIG_MMU */
1236        /* under NOMMU conditions, the exact address to be destroyed must be
1237         * given */
1238        retval = -EINVAL;
1239        if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1240                do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1241                retval = 0;
1242        }
1243
1244#endif
1245
1246        up_write(&mm->mmap_sem);
1247        return retval;
1248}
1249
1250#ifdef CONFIG_PROC_FS
1251static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1252{
1253        struct user_namespace *user_ns = seq_user_ns(s);
1254        struct shmid_kernel *shp = it;
1255        unsigned long rss = 0, swp = 0;
1256
1257        shm_add_rss_swap(shp, &rss, &swp);
1258
1259#if BITS_PER_LONG <= 32
1260#define SIZE_SPEC "%10lu"
1261#else
1262#define SIZE_SPEC "%21lu"
1263#endif
1264
1265        return seq_printf(s,
1266                          "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1267                          "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1268                          SIZE_SPEC " " SIZE_SPEC "\n",
1269                          shp->shm_perm.key,
1270                          shp->shm_perm.id,
1271                          shp->shm_perm.mode,
1272                          shp->shm_segsz,
1273                          shp->shm_cprid,
1274                          shp->shm_lprid,
1275                          shp->shm_nattch,
1276                          from_kuid_munged(user_ns, shp->shm_perm.uid),
1277                          from_kgid_munged(user_ns, shp->shm_perm.gid),
1278                          from_kuid_munged(user_ns, shp->shm_perm.cuid),
1279                          from_kgid_munged(user_ns, shp->shm_perm.cgid),
1280                          shp->shm_atim,
1281                          shp->shm_dtim,
1282                          shp->shm_ctim,
1283                          rss * PAGE_SIZE,
1284                          swp * PAGE_SIZE);
1285}
1286#endif
1287
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.