linux/ipc/mqueue.c
<<
>>
Prefs
   1/*
   2 * POSIX message queues filesystem for Linux.
   3 *
   4 * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
   5 *                          Michal Wronski          (michal.wronski@gmail.com)
   6 *
   7 * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
   8 * Lockless receive & send, fd based notify:
   9 *                          Manfred Spraul          (manfred@colorfullife.com)
  10 *
  11 * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
  12 *
  13 * This file is released under the GPL.
  14 */
  15
  16#include <linux/capability.h>
  17#include <linux/init.h>
  18#include <linux/pagemap.h>
  19#include <linux/file.h>
  20#include <linux/mount.h>
  21#include <linux/fs_context.h>
  22#include <linux/namei.h>
  23#include <linux/sysctl.h>
  24#include <linux/poll.h>
  25#include <linux/mqueue.h>
  26#include <linux/msg.h>
  27#include <linux/skbuff.h>
  28#include <linux/vmalloc.h>
  29#include <linux/netlink.h>
  30#include <linux/syscalls.h>
  31#include <linux/audit.h>
  32#include <linux/signal.h>
  33#include <linux/mutex.h>
  34#include <linux/nsproxy.h>
  35#include <linux/pid.h>
  36#include <linux/ipc_namespace.h>
  37#include <linux/user_namespace.h>
  38#include <linux/slab.h>
  39#include <linux/sched/wake_q.h>
  40#include <linux/sched/signal.h>
  41#include <linux/sched/user.h>
  42
  43#include <net/sock.h>
  44#include "util.h"
  45
  46struct mqueue_fs_context {
  47        struct ipc_namespace    *ipc_ns;
  48        bool                     newns; /* Set if newly created ipc namespace */
  49};
  50
  51#define MQUEUE_MAGIC    0x19800202
  52#define DIRENT_SIZE     20
  53#define FILENT_SIZE     80
  54
  55#define SEND            0
  56#define RECV            1
  57
  58#define STATE_NONE      0
  59#define STATE_READY     1
  60
  61struct posix_msg_tree_node {
  62        struct rb_node          rb_node;
  63        struct list_head        msg_list;
  64        int                     priority;
  65};
  66
  67/*
  68 * Locking:
  69 *
  70 * Accesses to a message queue are synchronized by acquiring info->lock.
  71 *
  72 * There are two notable exceptions:
  73 * - The actual wakeup of a sleeping task is performed using the wake_q
  74 *   framework. info->lock is already released when wake_up_q is called.
  75 * - The exit codepaths after sleeping check ext_wait_queue->state without
  76 *   any locks. If it is STATE_READY, then the syscall is completed without
  77 *   acquiring info->lock.
  78 *
  79 * MQ_BARRIER:
  80 * To achieve proper release/acquire memory barrier pairing, the state is set to
  81 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
  82 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
  83 *
  84 * This prevents the following races:
  85 *
  86 * 1) With the simple wake_q_add(), the task could be gone already before
  87 *    the increase of the reference happens
  88 * Thread A
  89 *                              Thread B
  90 * WRITE_ONCE(wait.state, STATE_NONE);
  91 * schedule_hrtimeout()
  92 *                              wake_q_add(A)
  93 *                              if (cmpxchg()) // success
  94 *                                 ->state = STATE_READY (reordered)
  95 * <timeout returns>
  96 * if (wait.state == STATE_READY) return;
  97 * sysret to user space
  98 * sys_exit()
  99 *                              get_task_struct() // UaF
 100 *
 101 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
 102 * the smp_store_release() that does ->state = STATE_READY.
 103 *
 104 * 2) Without proper _release/_acquire barriers, the woken up task
 105 *    could read stale data
 106 *
 107 * Thread A
 108 *                              Thread B
 109 * do_mq_timedreceive
 110 * WRITE_ONCE(wait.state, STATE_NONE);
 111 * schedule_hrtimeout()
 112 *                              state = STATE_READY;
 113 * <timeout returns>
 114 * if (wait.state == STATE_READY) return;
 115 * msg_ptr = wait.msg;          // Access to stale data!
 116 *                              receiver->msg = message; (reordered)
 117 *
 118 * Solution: use _release and _acquire barriers.
 119 *
 120 * 3) There is intentionally no barrier when setting current->state
 121 *    to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
 122 *    release memory barrier, and the wakeup is triggered when holding
 123 *    info->lock, i.e. spin_lock(&info->lock) provided a pairing
 124 *    acquire memory barrier.
 125 */
 126
 127struct ext_wait_queue {         /* queue of sleeping tasks */
 128        struct task_struct *task;
 129        struct list_head list;
 130        struct msg_msg *msg;    /* ptr of loaded message */
 131        int state;              /* one of STATE_* values */
 132};
 133
 134struct mqueue_inode_info {
 135        spinlock_t lock;
 136        struct inode vfs_inode;
 137        wait_queue_head_t wait_q;
 138
 139        struct rb_root msg_tree;
 140        struct rb_node *msg_tree_rightmost;
 141        struct posix_msg_tree_node *node_cache;
 142        struct mq_attr attr;
 143
 144        struct sigevent notify;
 145        struct pid *notify_owner;
 146        u32 notify_self_exec_id;
 147        struct user_namespace *notify_user_ns;
 148        struct ucounts *ucounts;        /* user who created, for accounting */
 149        struct sock *notify_sock;
 150        struct sk_buff *notify_cookie;
 151
 152        /* for tasks waiting for free space and messages, respectively */
 153        struct ext_wait_queue e_wait_q[2];
 154
 155        unsigned long qsize; /* size of queue in memory (sum of all msgs) */
 156};
 157
 158static struct file_system_type mqueue_fs_type;
 159static const struct inode_operations mqueue_dir_inode_operations;
 160static const struct file_operations mqueue_file_operations;
 161static const struct super_operations mqueue_super_ops;
 162static const struct fs_context_operations mqueue_fs_context_ops;
 163static void remove_notification(struct mqueue_inode_info *info);
 164
 165static struct kmem_cache *mqueue_inode_cachep;
 166
 167static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
 168{
 169        return container_of(inode, struct mqueue_inode_info, vfs_inode);
 170}
 171
 172/*
 173 * This routine should be called with the mq_lock held.
 174 */
 175static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
 176{
 177        return get_ipc_ns(inode->i_sb->s_fs_info);
 178}
 179
 180static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
 181{
 182        struct ipc_namespace *ns;
 183
 184        spin_lock(&mq_lock);
 185        ns = __get_ns_from_inode(inode);
 186        spin_unlock(&mq_lock);
 187        return ns;
 188}
 189
 190/* Auxiliary functions to manipulate messages' list */
 191static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
 192{
 193        struct rb_node **p, *parent = NULL;
 194        struct posix_msg_tree_node *leaf;
 195        bool rightmost = true;
 196
 197        p = &info->msg_tree.rb_node;
 198        while (*p) {
 199                parent = *p;
 200                leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 201
 202                if (likely(leaf->priority == msg->m_type))
 203                        goto insert_msg;
 204                else if (msg->m_type < leaf->priority) {
 205                        p = &(*p)->rb_left;
 206                        rightmost = false;
 207                } else
 208                        p = &(*p)->rb_right;
 209        }
 210        if (info->node_cache) {
 211                leaf = info->node_cache;
 212                info->node_cache = NULL;
 213        } else {
 214                leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
 215                if (!leaf)
 216                        return -ENOMEM;
 217                INIT_LIST_HEAD(&leaf->msg_list);
 218        }
 219        leaf->priority = msg->m_type;
 220
 221        if (rightmost)
 222                info->msg_tree_rightmost = &leaf->rb_node;
 223
 224        rb_link_node(&leaf->rb_node, parent, p);
 225        rb_insert_color(&leaf->rb_node, &info->msg_tree);
 226insert_msg:
 227        info->attr.mq_curmsgs++;
 228        info->qsize += msg->m_ts;
 229        list_add_tail(&msg->m_list, &leaf->msg_list);
 230        return 0;
 231}
 232
 233static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
 234                                  struct mqueue_inode_info *info)
 235{
 236        struct rb_node *node = &leaf->rb_node;
 237
 238        if (info->msg_tree_rightmost == node)
 239                info->msg_tree_rightmost = rb_prev(node);
 240
 241        rb_erase(node, &info->msg_tree);
 242        if (info->node_cache)
 243                kfree(leaf);
 244        else
 245                info->node_cache = leaf;
 246}
 247
 248static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
 249{
 250        struct rb_node *parent = NULL;
 251        struct posix_msg_tree_node *leaf;
 252        struct msg_msg *msg;
 253
 254try_again:
 255        /*
 256         * During insert, low priorities go to the left and high to the
 257         * right.  On receive, we want the highest priorities first, so
 258         * walk all the way to the right.
 259         */
 260        parent = info->msg_tree_rightmost;
 261        if (!parent) {
 262                if (info->attr.mq_curmsgs) {
 263                        pr_warn_once("Inconsistency in POSIX message queue, "
 264                                     "no tree element, but supposedly messages "
 265                                     "should exist!\n");
 266                        info->attr.mq_curmsgs = 0;
 267                }
 268                return NULL;
 269        }
 270        leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 271        if (unlikely(list_empty(&leaf->msg_list))) {
 272                pr_warn_once("Inconsistency in POSIX message queue, "
 273                             "empty leaf node but we haven't implemented "
 274                             "lazy leaf delete!\n");
 275                msg_tree_erase(leaf, info);
 276                goto try_again;
 277        } else {
 278                msg = list_first_entry(&leaf->msg_list,
 279                                       struct msg_msg, m_list);
 280                list_del(&msg->m_list);
 281                if (list_empty(&leaf->msg_list)) {
 282                        msg_tree_erase(leaf, info);
 283                }
 284        }
 285        info->attr.mq_curmsgs--;
 286        info->qsize -= msg->m_ts;
 287        return msg;
 288}
 289
 290static struct inode *mqueue_get_inode(struct super_block *sb,
 291                struct ipc_namespace *ipc_ns, umode_t mode,
 292                struct mq_attr *attr)
 293{
 294        struct inode *inode;
 295        int ret = -ENOMEM;
 296
 297        inode = new_inode(sb);
 298        if (!inode)
 299                goto err;
 300
 301        inode->i_ino = get_next_ino();
 302        inode->i_mode = mode;
 303        inode->i_uid = current_fsuid();
 304        inode->i_gid = current_fsgid();
 305        inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
 306
 307        if (S_ISREG(mode)) {
 308                struct mqueue_inode_info *info;
 309                unsigned long mq_bytes, mq_treesize;
 310
 311                inode->i_fop = &mqueue_file_operations;
 312                inode->i_size = FILENT_SIZE;
 313                /* mqueue specific info */
 314                info = MQUEUE_I(inode);
 315                spin_lock_init(&info->lock);
 316                init_waitqueue_head(&info->wait_q);
 317                INIT_LIST_HEAD(&info->e_wait_q[0].list);
 318                INIT_LIST_HEAD(&info->e_wait_q[1].list);
 319                info->notify_owner = NULL;
 320                info->notify_user_ns = NULL;
 321                info->qsize = 0;
 322                info->ucounts = NULL;   /* set when all is ok */
 323                info->msg_tree = RB_ROOT;
 324                info->msg_tree_rightmost = NULL;
 325                info->node_cache = NULL;
 326                memset(&info->attr, 0, sizeof(info->attr));
 327                info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
 328                                           ipc_ns->mq_msg_default);
 329                info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
 330                                            ipc_ns->mq_msgsize_default);
 331                if (attr) {
 332                        info->attr.mq_maxmsg = attr->mq_maxmsg;
 333                        info->attr.mq_msgsize = attr->mq_msgsize;
 334                }
 335                /*
 336                 * We used to allocate a static array of pointers and account
 337                 * the size of that array as well as one msg_msg struct per
 338                 * possible message into the queue size. That's no longer
 339                 * accurate as the queue is now an rbtree and will grow and
 340                 * shrink depending on usage patterns.  We can, however, still
 341                 * account one msg_msg struct per message, but the nodes are
 342                 * allocated depending on priority usage, and most programs
 343                 * only use one, or a handful, of priorities.  However, since
 344                 * this is pinned memory, we need to assume worst case, so
 345                 * that means the min(mq_maxmsg, max_priorities) * struct
 346                 * posix_msg_tree_node.
 347                 */
 348
 349                ret = -EINVAL;
 350                if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
 351                        goto out_inode;
 352                if (capable(CAP_SYS_RESOURCE)) {
 353                        if (info->attr.mq_maxmsg > HARD_MSGMAX ||
 354                            info->attr.mq_msgsize > HARD_MSGSIZEMAX)
 355                                goto out_inode;
 356                } else {
 357                        if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
 358                                        info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
 359                                goto out_inode;
 360                }
 361                ret = -EOVERFLOW;
 362                /* check for overflow */
 363                if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
 364                        goto out_inode;
 365                mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 366                        min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 367                        sizeof(struct posix_msg_tree_node);
 368                mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
 369                if (mq_bytes + mq_treesize < mq_bytes)
 370                        goto out_inode;
 371                mq_bytes += mq_treesize;
 372                info->ucounts = get_ucounts(current_ucounts());
 373                if (info->ucounts) {
 374                        long msgqueue;
 375
 376                        spin_lock(&mq_lock);
 377                        msgqueue = inc_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
 378                        if (msgqueue == LONG_MAX || msgqueue > rlimit(RLIMIT_MSGQUEUE)) {
 379                                dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
 380                                spin_unlock(&mq_lock);
 381                                put_ucounts(info->ucounts);
 382                                info->ucounts = NULL;
 383                                /* mqueue_evict_inode() releases info->messages */
 384                                ret = -EMFILE;
 385                                goto out_inode;
 386                        }
 387                        spin_unlock(&mq_lock);
 388                }
 389        } else if (S_ISDIR(mode)) {
 390                inc_nlink(inode);
 391                /* Some things misbehave if size == 0 on a directory */
 392                inode->i_size = 2 * DIRENT_SIZE;
 393                inode->i_op = &mqueue_dir_inode_operations;
 394                inode->i_fop = &simple_dir_operations;
 395        }
 396
 397        return inode;
 398out_inode:
 399        iput(inode);
 400err:
 401        return ERR_PTR(ret);
 402}
 403
 404static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
 405{
 406        struct inode *inode;
 407        struct ipc_namespace *ns = sb->s_fs_info;
 408
 409        sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
 410        sb->s_blocksize = PAGE_SIZE;
 411        sb->s_blocksize_bits = PAGE_SHIFT;
 412        sb->s_magic = MQUEUE_MAGIC;
 413        sb->s_op = &mqueue_super_ops;
 414
 415        inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
 416        if (IS_ERR(inode))
 417                return PTR_ERR(inode);
 418
 419        sb->s_root = d_make_root(inode);
 420        if (!sb->s_root)
 421                return -ENOMEM;
 422        return 0;
 423}
 424
 425static int mqueue_get_tree(struct fs_context *fc)
 426{
 427        struct mqueue_fs_context *ctx = fc->fs_private;
 428
 429        /*
 430         * With a newly created ipc namespace, we don't need to do a search
 431         * for an ipc namespace match, but we still need to set s_fs_info.
 432         */
 433        if (ctx->newns) {
 434                fc->s_fs_info = ctx->ipc_ns;
 435                return get_tree_nodev(fc, mqueue_fill_super);
 436        }
 437        return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
 438}
 439
 440static void mqueue_fs_context_free(struct fs_context *fc)
 441{
 442        struct mqueue_fs_context *ctx = fc->fs_private;
 443
 444        put_ipc_ns(ctx->ipc_ns);
 445        kfree(ctx);
 446}
 447
 448static int mqueue_init_fs_context(struct fs_context *fc)
 449{
 450        struct mqueue_fs_context *ctx;
 451
 452        ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
 453        if (!ctx)
 454                return -ENOMEM;
 455
 456        ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
 457        put_user_ns(fc->user_ns);
 458        fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
 459        fc->fs_private = ctx;
 460        fc->ops = &mqueue_fs_context_ops;
 461        return 0;
 462}
 463
 464/*
 465 * mq_init_ns() is currently the only caller of mq_create_mount().
 466 * So the ns parameter is always a newly created ipc namespace.
 467 */
 468static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
 469{
 470        struct mqueue_fs_context *ctx;
 471        struct fs_context *fc;
 472        struct vfsmount *mnt;
 473
 474        fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
 475        if (IS_ERR(fc))
 476                return ERR_CAST(fc);
 477
 478        ctx = fc->fs_private;
 479        ctx->newns = true;
 480        put_ipc_ns(ctx->ipc_ns);
 481        ctx->ipc_ns = get_ipc_ns(ns);
 482        put_user_ns(fc->user_ns);
 483        fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
 484
 485        mnt = fc_mount(fc);
 486        put_fs_context(fc);
 487        return mnt;
 488}
 489
 490static void init_once(void *foo)
 491{
 492        struct mqueue_inode_info *p = foo;
 493
 494        inode_init_once(&p->vfs_inode);
 495}
 496
 497static struct inode *mqueue_alloc_inode(struct super_block *sb)
 498{
 499        struct mqueue_inode_info *ei;
 500
 501        ei = alloc_inode_sb(sb, mqueue_inode_cachep, GFP_KERNEL);
 502        if (!ei)
 503                return NULL;
 504        return &ei->vfs_inode;
 505}
 506
 507static void mqueue_free_inode(struct inode *inode)
 508{
 509        kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
 510}
 511
 512static void mqueue_evict_inode(struct inode *inode)
 513{
 514        struct mqueue_inode_info *info;
 515        struct ipc_namespace *ipc_ns;
 516        struct msg_msg *msg, *nmsg;
 517        LIST_HEAD(tmp_msg);
 518
 519        clear_inode(inode);
 520
 521        if (S_ISDIR(inode->i_mode))
 522                return;
 523
 524        ipc_ns = get_ns_from_inode(inode);
 525        info = MQUEUE_I(inode);
 526        spin_lock(&info->lock);
 527        while ((msg = msg_get(info)) != NULL)
 528                list_add_tail(&msg->m_list, &tmp_msg);
 529        kfree(info->node_cache);
 530        spin_unlock(&info->lock);
 531
 532        list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
 533                list_del(&msg->m_list);
 534                free_msg(msg);
 535        }
 536
 537        if (info->ucounts) {
 538                unsigned long mq_bytes, mq_treesize;
 539
 540                /* Total amount of bytes accounted for the mqueue */
 541                mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 542                        min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 543                        sizeof(struct posix_msg_tree_node);
 544
 545                mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
 546                                          info->attr.mq_msgsize);
 547
 548                spin_lock(&mq_lock);
 549                dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
 550                /*
 551                 * get_ns_from_inode() ensures that the
 552                 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
 553                 * to which we now hold a reference, or it is NULL.
 554                 * We can't put it here under mq_lock, though.
 555                 */
 556                if (ipc_ns)
 557                        ipc_ns->mq_queues_count--;
 558                spin_unlock(&mq_lock);
 559                put_ucounts(info->ucounts);
 560                info->ucounts = NULL;
 561        }
 562        if (ipc_ns)
 563                put_ipc_ns(ipc_ns);
 564}
 565
 566static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
 567{
 568        struct inode *dir = dentry->d_parent->d_inode;
 569        struct inode *inode;
 570        struct mq_attr *attr = arg;
 571        int error;
 572        struct ipc_namespace *ipc_ns;
 573
 574        spin_lock(&mq_lock);
 575        ipc_ns = __get_ns_from_inode(dir);
 576        if (!ipc_ns) {
 577                error = -EACCES;
 578                goto out_unlock;
 579        }
 580
 581        if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
 582            !capable(CAP_SYS_RESOURCE)) {
 583                error = -ENOSPC;
 584                goto out_unlock;
 585        }
 586        ipc_ns->mq_queues_count++;
 587        spin_unlock(&mq_lock);
 588
 589        inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
 590        if (IS_ERR(inode)) {
 591                error = PTR_ERR(inode);
 592                spin_lock(&mq_lock);
 593                ipc_ns->mq_queues_count--;
 594                goto out_unlock;
 595        }
 596
 597        put_ipc_ns(ipc_ns);
 598        dir->i_size += DIRENT_SIZE;
 599        dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
 600
 601        d_instantiate(dentry, inode);
 602        dget(dentry);
 603        return 0;
 604out_unlock:
 605        spin_unlock(&mq_lock);
 606        if (ipc_ns)
 607                put_ipc_ns(ipc_ns);
 608        return error;
 609}
 610
 611static int mqueue_create(struct user_namespace *mnt_userns, struct inode *dir,
 612                         struct dentry *dentry, umode_t mode, bool excl)
 613{
 614        return mqueue_create_attr(dentry, mode, NULL);
 615}
 616
 617static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
 618{
 619        struct inode *inode = d_inode(dentry);
 620
 621        dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
 622        dir->i_size -= DIRENT_SIZE;
 623        drop_nlink(inode);
 624        dput(dentry);
 625        return 0;
 626}
 627
 628/*
 629*       This is routine for system read from queue file.
 630*       To avoid mess with doing here some sort of mq_receive we allow
 631*       to read only queue size & notification info (the only values
 632*       that are interesting from user point of view and aren't accessible
 633*       through std routines)
 634*/
 635static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
 636                                size_t count, loff_t *off)
 637{
 638        struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 639        char buffer[FILENT_SIZE];
 640        ssize_t ret;
 641
 642        spin_lock(&info->lock);
 643        snprintf(buffer, sizeof(buffer),
 644                        "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
 645                        info->qsize,
 646                        info->notify_owner ? info->notify.sigev_notify : 0,
 647                        (info->notify_owner &&
 648                         info->notify.sigev_notify == SIGEV_SIGNAL) ?
 649                                info->notify.sigev_signo : 0,
 650                        pid_vnr(info->notify_owner));
 651        spin_unlock(&info->lock);
 652        buffer[sizeof(buffer)-1] = '\0';
 653
 654        ret = simple_read_from_buffer(u_data, count, off, buffer,
 655                                strlen(buffer));
 656        if (ret <= 0)
 657                return ret;
 658
 659        file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
 660        return ret;
 661}
 662
 663static int mqueue_flush_file(struct file *filp, fl_owner_t id)
 664{
 665        struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 666
 667        spin_lock(&info->lock);
 668        if (task_tgid(current) == info->notify_owner)
 669                remove_notification(info);
 670
 671        spin_unlock(&info->lock);
 672        return 0;
 673}
 674
 675static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
 676{
 677        struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 678        __poll_t retval = 0;
 679
 680        poll_wait(filp, &info->wait_q, poll_tab);
 681
 682        spin_lock(&info->lock);
 683        if (info->attr.mq_curmsgs)
 684                retval = EPOLLIN | EPOLLRDNORM;
 685
 686        if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
 687                retval |= EPOLLOUT | EPOLLWRNORM;
 688        spin_unlock(&info->lock);
 689
 690        return retval;
 691}
 692
 693/* Adds current to info->e_wait_q[sr] before element with smaller prio */
 694static void wq_add(struct mqueue_inode_info *info, int sr,
 695                        struct ext_wait_queue *ewp)
 696{
 697        struct ext_wait_queue *walk;
 698
 699        list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
 700                if (walk->task->prio <= current->prio) {
 701                        list_add_tail(&ewp->list, &walk->list);
 702                        return;
 703                }
 704        }
 705        list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
 706}
 707
 708/*
 709 * Puts current task to sleep. Caller must hold queue lock. After return
 710 * lock isn't held.
 711 * sr: SEND or RECV
 712 */
 713static int wq_sleep(struct mqueue_inode_info *info, int sr,
 714                    ktime_t *timeout, struct ext_wait_queue *ewp)
 715        __releases(&info->lock)
 716{
 717        int retval;
 718        signed long time;
 719
 720        wq_add(info, sr, ewp);
 721
 722        for (;;) {
 723                /* memory barrier not required, we hold info->lock */
 724                __set_current_state(TASK_INTERRUPTIBLE);
 725
 726                spin_unlock(&info->lock);
 727                time = schedule_hrtimeout_range_clock(timeout, 0,
 728                        HRTIMER_MODE_ABS, CLOCK_REALTIME);
 729
 730                if (READ_ONCE(ewp->state) == STATE_READY) {
 731                        /* see MQ_BARRIER for purpose/pairing */
 732                        smp_acquire__after_ctrl_dep();
 733                        retval = 0;
 734                        goto out;
 735                }
 736                spin_lock(&info->lock);
 737
 738                /* we hold info->lock, so no memory barrier required */
 739                if (READ_ONCE(ewp->state) == STATE_READY) {
 740                        retval = 0;
 741                        goto out_unlock;
 742                }
 743                if (signal_pending(current)) {
 744                        retval = -ERESTARTSYS;
 745                        break;
 746                }
 747                if (time == 0) {
 748                        retval = -ETIMEDOUT;
 749                        break;
 750                }
 751        }
 752        list_del(&ewp->list);
 753out_unlock:
 754        spin_unlock(&info->lock);
 755out:
 756        return retval;
 757}
 758
 759/*
 760 * Returns waiting task that should be serviced first or NULL if none exists
 761 */
 762static struct ext_wait_queue *wq_get_first_waiter(
 763                struct mqueue_inode_info *info, int sr)
 764{
 765        struct list_head *ptr;
 766
 767        ptr = info->e_wait_q[sr].list.prev;
 768        if (ptr == &info->e_wait_q[sr].list)
 769                return NULL;
 770        return list_entry(ptr, struct ext_wait_queue, list);
 771}
 772
 773
 774static inline void set_cookie(struct sk_buff *skb, char code)
 775{
 776        ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
 777}
 778
 779/*
 780 * The next function is only to split too long sys_mq_timedsend
 781 */
 782static void __do_notify(struct mqueue_inode_info *info)
 783{
 784        /* notification
 785         * invoked when there is registered process and there isn't process
 786         * waiting synchronously for message AND state of queue changed from
 787         * empty to not empty. Here we are sure that no one is waiting
 788         * synchronously. */
 789        if (info->notify_owner &&
 790            info->attr.mq_curmsgs == 1) {
 791                switch (info->notify.sigev_notify) {
 792                case SIGEV_NONE:
 793                        break;
 794                case SIGEV_SIGNAL: {
 795                        struct kernel_siginfo sig_i;
 796                        struct task_struct *task;
 797
 798                        /* do_mq_notify() accepts sigev_signo == 0, why?? */
 799                        if (!info->notify.sigev_signo)
 800                                break;
 801
 802                        clear_siginfo(&sig_i);
 803                        sig_i.si_signo = info->notify.sigev_signo;
 804                        sig_i.si_errno = 0;
 805                        sig_i.si_code = SI_MESGQ;
 806                        sig_i.si_value = info->notify.sigev_value;
 807                        rcu_read_lock();
 808                        /* map current pid/uid into info->owner's namespaces */
 809                        sig_i.si_pid = task_tgid_nr_ns(current,
 810                                                ns_of_pid(info->notify_owner));
 811                        sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
 812                                                current_uid());
 813                        /*
 814                         * We can't use kill_pid_info(), this signal should
 815                         * bypass check_kill_permission(). It is from kernel
 816                         * but si_fromuser() can't know this.
 817                         * We do check the self_exec_id, to avoid sending
 818                         * signals to programs that don't expect them.
 819                         */
 820                        task = pid_task(info->notify_owner, PIDTYPE_TGID);
 821                        if (task && task->self_exec_id ==
 822                                                info->notify_self_exec_id) {
 823                                do_send_sig_info(info->notify.sigev_signo,
 824                                                &sig_i, task, PIDTYPE_TGID);
 825                        }
 826                        rcu_read_unlock();
 827                        break;
 828                }
 829                case SIGEV_THREAD:
 830                        set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
 831                        netlink_sendskb(info->notify_sock, info->notify_cookie);
 832                        break;
 833                }
 834                /* after notification unregisters process */
 835                put_pid(info->notify_owner);
 836                put_user_ns(info->notify_user_ns);
 837                info->notify_owner = NULL;
 838                info->notify_user_ns = NULL;
 839        }
 840        wake_up(&info->wait_q);
 841}
 842
 843static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout,
 844                           struct timespec64 *ts)
 845{
 846        if (get_timespec64(ts, u_abs_timeout))
 847                return -EFAULT;
 848        if (!timespec64_valid(ts))
 849                return -EINVAL;
 850        return 0;
 851}
 852
 853static void remove_notification(struct mqueue_inode_info *info)
 854{
 855        if (info->notify_owner != NULL &&
 856            info->notify.sigev_notify == SIGEV_THREAD) {
 857                set_cookie(info->notify_cookie, NOTIFY_REMOVED);
 858                netlink_sendskb(info->notify_sock, info->notify_cookie);
 859        }
 860        put_pid(info->notify_owner);
 861        put_user_ns(info->notify_user_ns);
 862        info->notify_owner = NULL;
 863        info->notify_user_ns = NULL;
 864}
 865
 866static int prepare_open(struct dentry *dentry, int oflag, int ro,
 867                        umode_t mode, struct filename *name,
 868                        struct mq_attr *attr)
 869{
 870        static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
 871                                                  MAY_READ | MAY_WRITE };
 872        int acc;
 873
 874        if (d_really_is_negative(dentry)) {
 875                if (!(oflag & O_CREAT))
 876                        return -ENOENT;
 877                if (ro)
 878                        return ro;
 879                audit_inode_parent_hidden(name, dentry->d_parent);
 880                return vfs_mkobj(dentry, mode & ~current_umask(),
 881                                  mqueue_create_attr, attr);
 882        }
 883        /* it already existed */
 884        audit_inode(name, dentry, 0);
 885        if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
 886                return -EEXIST;
 887        if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
 888                return -EINVAL;
 889        acc = oflag2acc[oflag & O_ACCMODE];
 890        return inode_permission(&init_user_ns, d_inode(dentry), acc);
 891}
 892
 893static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
 894                      struct mq_attr *attr)
 895{
 896        struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
 897        struct dentry *root = mnt->mnt_root;
 898        struct filename *name;
 899        struct path path;
 900        int fd, error;
 901        int ro;
 902
 903        audit_mq_open(oflag, mode, attr);
 904
 905        if (IS_ERR(name = getname(u_name)))
 906                return PTR_ERR(name);
 907
 908        fd = get_unused_fd_flags(O_CLOEXEC);
 909        if (fd < 0)
 910                goto out_putname;
 911
 912        ro = mnt_want_write(mnt);       /* we'll drop it in any case */
 913        inode_lock(d_inode(root));
 914        path.dentry = lookup_one_len(name->name, root, strlen(name->name));
 915        if (IS_ERR(path.dentry)) {
 916                error = PTR_ERR(path.dentry);
 917                goto out_putfd;
 918        }
 919        path.mnt = mntget(mnt);
 920        error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
 921        if (!error) {
 922                struct file *file = dentry_open(&path, oflag, current_cred());
 923                if (!IS_ERR(file))
 924                        fd_install(fd, file);
 925                else
 926                        error = PTR_ERR(file);
 927        }
 928        path_put(&path);
 929out_putfd:
 930        if (error) {
 931                put_unused_fd(fd);
 932                fd = error;
 933        }
 934        inode_unlock(d_inode(root));
 935        if (!ro)
 936                mnt_drop_write(mnt);
 937out_putname:
 938        putname(name);
 939        return fd;
 940}
 941
 942SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
 943                struct mq_attr __user *, u_attr)
 944{
 945        struct mq_attr attr;
 946        if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
 947                return -EFAULT;
 948
 949        return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
 950}
 951
 952SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
 953{
 954        int err;
 955        struct filename *name;
 956        struct dentry *dentry;
 957        struct inode *inode = NULL;
 958        struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
 959        struct vfsmount *mnt = ipc_ns->mq_mnt;
 960
 961        name = getname(u_name);
 962        if (IS_ERR(name))
 963                return PTR_ERR(name);
 964
 965        audit_inode_parent_hidden(name, mnt->mnt_root);
 966        err = mnt_want_write(mnt);
 967        if (err)
 968                goto out_name;
 969        inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
 970        dentry = lookup_one_len(name->name, mnt->mnt_root,
 971                                strlen(name->name));
 972        if (IS_ERR(dentry)) {
 973                err = PTR_ERR(dentry);
 974                goto out_unlock;
 975        }
 976
 977        inode = d_inode(dentry);
 978        if (!inode) {
 979                err = -ENOENT;
 980        } else {
 981                ihold(inode);
 982                err = vfs_unlink(&init_user_ns, d_inode(dentry->d_parent),
 983                                 dentry, NULL);
 984        }
 985        dput(dentry);
 986
 987out_unlock:
 988        inode_unlock(d_inode(mnt->mnt_root));
 989        if (inode)
 990                iput(inode);
 991        mnt_drop_write(mnt);
 992out_name:
 993        putname(name);
 994
 995        return err;
 996}
 997
 998/* Pipelined send and receive functions.
 999 *
1000 * If a receiver finds no waiting message, then it registers itself in the
1001 * list of waiting receivers. A sender checks that list before adding the new
1002 * message into the message array. If there is a waiting receiver, then it
1003 * bypasses the message array and directly hands the message over to the
1004 * receiver. The receiver accepts the message and returns without grabbing the
1005 * queue spinlock:
1006 *
1007 * - Set pointer to message.
1008 * - Queue the receiver task for later wakeup (without the info->lock).
1009 * - Update its state to STATE_READY. Now the receiver can continue.
1010 * - Wake up the process after the lock is dropped. Should the process wake up
1011 *   before this wakeup (due to a timeout or a signal) it will either see
1012 *   STATE_READY and continue or acquire the lock to check the state again.
1013 *
1014 * The same algorithm is used for senders.
1015 */
1016
1017static inline void __pipelined_op(struct wake_q_head *wake_q,
1018                                  struct mqueue_inode_info *info,
1019                                  struct ext_wait_queue *this)
1020{
1021        struct task_struct *task;
1022
1023        list_del(&this->list);
1024        task = get_task_struct(this->task);
1025
1026        /* see MQ_BARRIER for purpose/pairing */
1027        smp_store_release(&this->state, STATE_READY);
1028        wake_q_add_safe(wake_q, task);
1029}
1030
1031/* pipelined_send() - send a message directly to the task waiting in
1032 * sys_mq_timedreceive() (without inserting message into a queue).
1033 */
1034static inline void pipelined_send(struct wake_q_head *wake_q,
1035                                  struct mqueue_inode_info *info,
1036                                  struct msg_msg *message,
1037                                  struct ext_wait_queue *receiver)
1038{
1039        receiver->msg = message;
1040        __pipelined_op(wake_q, info, receiver);
1041}
1042
1043/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
1044 * gets its message and put to the queue (we have one free place for sure). */
1045static inline void pipelined_receive(struct wake_q_head *wake_q,
1046                                     struct mqueue_inode_info *info)
1047{
1048        struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
1049
1050        if (!sender) {
1051                /* for poll */
1052                wake_up_interruptible(&info->wait_q);
1053                return;
1054        }
1055        if (msg_insert(sender->msg, info))
1056                return;
1057
1058        __pipelined_op(wake_q, info, sender);
1059}
1060
1061static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
1062                size_t msg_len, unsigned int msg_prio,
1063                struct timespec64 *ts)
1064{
1065        struct fd f;
1066        struct inode *inode;
1067        struct ext_wait_queue wait;
1068        struct ext_wait_queue *receiver;
1069        struct msg_msg *msg_ptr;
1070        struct mqueue_inode_info *info;
1071        ktime_t expires, *timeout = NULL;
1072        struct posix_msg_tree_node *new_leaf = NULL;
1073        int ret = 0;
1074        DEFINE_WAKE_Q(wake_q);
1075
1076        if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
1077                return -EINVAL;
1078
1079        if (ts) {
1080                expires = timespec64_to_ktime(*ts);
1081                timeout = &expires;
1082        }
1083
1084        audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
1085
1086        f = fdget(mqdes);
1087        if (unlikely(!f.file)) {
1088                ret = -EBADF;
1089                goto out;
1090        }
1091
1092        inode = file_inode(f.file);
1093        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1094                ret = -EBADF;
1095                goto out_fput;
1096        }
1097        info = MQUEUE_I(inode);
1098        audit_file(f.file);
1099
1100        if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
1101                ret = -EBADF;
1102                goto out_fput;
1103        }
1104
1105        if (unlikely(msg_len > info->attr.mq_msgsize)) {
1106                ret = -EMSGSIZE;
1107                goto out_fput;
1108        }
1109
1110        /* First try to allocate memory, before doing anything with
1111         * existing queues. */
1112        msg_ptr = load_msg(u_msg_ptr, msg_len);
1113        if (IS_ERR(msg_ptr)) {
1114                ret = PTR_ERR(msg_ptr);
1115                goto out_fput;
1116        }
1117        msg_ptr->m_ts = msg_len;
1118        msg_ptr->m_type = msg_prio;
1119
1120        /*
1121         * msg_insert really wants us to have a valid, spare node struct so
1122         * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1123         * fall back to that if necessary.
1124         */
1125        if (!info->node_cache)
1126                new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1127
1128        spin_lock(&info->lock);
1129
1130        if (!info->node_cache && new_leaf) {
1131                /* Save our speculative allocation into the cache */
1132                INIT_LIST_HEAD(&new_leaf->msg_list);
1133                info->node_cache = new_leaf;
1134                new_leaf = NULL;
1135        } else {
1136                kfree(new_leaf);
1137        }
1138
1139        if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1140                if (f.file->f_flags & O_NONBLOCK) {
1141                        ret = -EAGAIN;
1142                } else {
1143                        wait.task = current;
1144                        wait.msg = (void *) msg_ptr;
1145
1146                        /* memory barrier not required, we hold info->lock */
1147                        WRITE_ONCE(wait.state, STATE_NONE);
1148                        ret = wq_sleep(info, SEND, timeout, &wait);
1149                        /*
1150                         * wq_sleep must be called with info->lock held, and
1151                         * returns with the lock released
1152                         */
1153                        goto out_free;
1154                }
1155        } else {
1156                receiver = wq_get_first_waiter(info, RECV);
1157                if (receiver) {
1158                        pipelined_send(&wake_q, info, msg_ptr, receiver);
1159                } else {
1160                        /* adds message to the queue */
1161                        ret = msg_insert(msg_ptr, info);
1162                        if (ret)
1163                                goto out_unlock;
1164                        __do_notify(info);
1165                }
1166                inode->i_atime = inode->i_mtime = inode->i_ctime =
1167                                current_time(inode);
1168        }
1169out_unlock:
1170        spin_unlock(&info->lock);
1171        wake_up_q(&wake_q);
1172out_free:
1173        if (ret)
1174                free_msg(msg_ptr);
1175out_fput:
1176        fdput(f);
1177out:
1178        return ret;
1179}
1180
1181static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1182                size_t msg_len, unsigned int __user *u_msg_prio,
1183                struct timespec64 *ts)
1184{
1185        ssize_t ret;
1186        struct msg_msg *msg_ptr;
1187        struct fd f;
1188        struct inode *inode;
1189        struct mqueue_inode_info *info;
1190        struct ext_wait_queue wait;
1191        ktime_t expires, *timeout = NULL;
1192        struct posix_msg_tree_node *new_leaf = NULL;
1193
1194        if (ts) {
1195                expires = timespec64_to_ktime(*ts);
1196                timeout = &expires;
1197        }
1198
1199        audit_mq_sendrecv(mqdes, msg_len, 0, ts);
1200
1201        f = fdget(mqdes);
1202        if (unlikely(!f.file)) {
1203                ret = -EBADF;
1204                goto out;
1205        }
1206
1207        inode = file_inode(f.file);
1208        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1209                ret = -EBADF;
1210                goto out_fput;
1211        }
1212        info = MQUEUE_I(inode);
1213        audit_file(f.file);
1214
1215        if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1216                ret = -EBADF;
1217                goto out_fput;
1218        }
1219
1220        /* checks if buffer is big enough */
1221        if (unlikely(msg_len < info->attr.mq_msgsize)) {
1222                ret = -EMSGSIZE;
1223                goto out_fput;
1224        }
1225
1226        /*
1227         * msg_insert really wants us to have a valid, spare node struct so
1228         * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1229         * fall back to that if necessary.
1230         */
1231        if (!info->node_cache)
1232                new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1233
1234        spin_lock(&info->lock);
1235
1236        if (!info->node_cache && new_leaf) {
1237                /* Save our speculative allocation into the cache */
1238                INIT_LIST_HEAD(&new_leaf->msg_list);
1239                info->node_cache = new_leaf;
1240        } else {
1241                kfree(new_leaf);
1242        }
1243
1244        if (info->attr.mq_curmsgs == 0) {
1245                if (f.file->f_flags & O_NONBLOCK) {
1246                        spin_unlock(&info->lock);
1247                        ret = -EAGAIN;
1248                } else {
1249                        wait.task = current;
1250
1251                        /* memory barrier not required, we hold info->lock */
1252                        WRITE_ONCE(wait.state, STATE_NONE);
1253                        ret = wq_sleep(info, RECV, timeout, &wait);
1254                        msg_ptr = wait.msg;
1255                }
1256        } else {
1257                DEFINE_WAKE_Q(wake_q);
1258
1259                msg_ptr = msg_get(info);
1260
1261                inode->i_atime = inode->i_mtime = inode->i_ctime =
1262                                current_time(inode);
1263
1264                /* There is now free space in queue. */
1265                pipelined_receive(&wake_q, info);
1266                spin_unlock(&info->lock);
1267                wake_up_q(&wake_q);
1268                ret = 0;
1269        }
1270        if (ret == 0) {
1271                ret = msg_ptr->m_ts;
1272
1273                if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1274                        store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1275                        ret = -EFAULT;
1276                }
1277                free_msg(msg_ptr);
1278        }
1279out_fput:
1280        fdput(f);
1281out:
1282        return ret;
1283}
1284
1285SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1286                size_t, msg_len, unsigned int, msg_prio,
1287                const struct __kernel_timespec __user *, u_abs_timeout)
1288{
1289        struct timespec64 ts, *p = NULL;
1290        if (u_abs_timeout) {
1291                int res = prepare_timeout(u_abs_timeout, &ts);
1292                if (res)
1293                        return res;
1294                p = &ts;
1295        }
1296        return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1297}
1298
1299SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1300                size_t, msg_len, unsigned int __user *, u_msg_prio,
1301                const struct __kernel_timespec __user *, u_abs_timeout)
1302{
1303        struct timespec64 ts, *p = NULL;
1304        if (u_abs_timeout) {
1305                int res = prepare_timeout(u_abs_timeout, &ts);
1306                if (res)
1307                        return res;
1308                p = &ts;
1309        }
1310        return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1311}
1312
1313/*
1314 * Notes: the case when user wants us to deregister (with NULL as pointer)
1315 * and he isn't currently owner of notification, will be silently discarded.
1316 * It isn't explicitly defined in the POSIX.
1317 */
1318static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
1319{
1320        int ret;
1321        struct fd f;
1322        struct sock *sock;
1323        struct inode *inode;
1324        struct mqueue_inode_info *info;
1325        struct sk_buff *nc;
1326
1327        audit_mq_notify(mqdes, notification);
1328
1329        nc = NULL;
1330        sock = NULL;
1331        if (notification != NULL) {
1332                if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1333                             notification->sigev_notify != SIGEV_SIGNAL &&
1334                             notification->sigev_notify != SIGEV_THREAD))
1335                        return -EINVAL;
1336                if (notification->sigev_notify == SIGEV_SIGNAL &&
1337                        !valid_signal(notification->sigev_signo)) {
1338                        return -EINVAL;
1339                }
1340                if (notification->sigev_notify == SIGEV_THREAD) {
1341                        long timeo;
1342
1343                        /* create the notify skb */
1344                        nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1345                        if (!nc)
1346                                return -ENOMEM;
1347
1348                        if (copy_from_user(nc->data,
1349                                        notification->sigev_value.sival_ptr,
1350                                        NOTIFY_COOKIE_LEN)) {
1351                                ret = -EFAULT;
1352                                goto free_skb;
1353                        }
1354
1355                        /* TODO: add a header? */
1356                        skb_put(nc, NOTIFY_COOKIE_LEN);
1357                        /* and attach it to the socket */
1358retry:
1359                        f = fdget(notification->sigev_signo);
1360                        if (!f.file) {
1361                                ret = -EBADF;
1362                                goto out;
1363                        }
1364                        sock = netlink_getsockbyfilp(f.file);
1365                        fdput(f);
1366                        if (IS_ERR(sock)) {
1367                                ret = PTR_ERR(sock);
1368                                goto free_skb;
1369                        }
1370
1371                        timeo = MAX_SCHEDULE_TIMEOUT;
1372                        ret = netlink_attachskb(sock, nc, &timeo, NULL);
1373                        if (ret == 1) {
1374                                sock = NULL;
1375                                goto retry;
1376                        }
1377                        if (ret)
1378                                return ret;
1379                }
1380        }
1381
1382        f = fdget(mqdes);
1383        if (!f.file) {
1384                ret = -EBADF;
1385                goto out;
1386        }
1387
1388        inode = file_inode(f.file);
1389        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1390                ret = -EBADF;
1391                goto out_fput;
1392        }
1393        info = MQUEUE_I(inode);
1394
1395        ret = 0;
1396        spin_lock(&info->lock);
1397        if (notification == NULL) {
1398                if (info->notify_owner == task_tgid(current)) {
1399                        remove_notification(info);
1400                        inode->i_atime = inode->i_ctime = current_time(inode);
1401                }
1402        } else if (info->notify_owner != NULL) {
1403                ret = -EBUSY;
1404        } else {
1405                switch (notification->sigev_notify) {
1406                case SIGEV_NONE:
1407                        info->notify.sigev_notify = SIGEV_NONE;
1408                        break;
1409                case SIGEV_THREAD:
1410                        info->notify_sock = sock;
1411                        info->notify_cookie = nc;
1412                        sock = NULL;
1413                        nc = NULL;
1414                        info->notify.sigev_notify = SIGEV_THREAD;
1415                        break;
1416                case SIGEV_SIGNAL:
1417                        info->notify.sigev_signo = notification->sigev_signo;
1418                        info->notify.sigev_value = notification->sigev_value;
1419                        info->notify.sigev_notify = SIGEV_SIGNAL;
1420                        info->notify_self_exec_id = current->self_exec_id;
1421                        break;
1422                }
1423
1424                info->notify_owner = get_pid(task_tgid(current));
1425                info->notify_user_ns = get_user_ns(current_user_ns());
1426                inode->i_atime = inode->i_ctime = current_time(inode);
1427        }
1428        spin_unlock(&info->lock);
1429out_fput:
1430        fdput(f);
1431out:
1432        if (sock)
1433                netlink_detachskb(sock, nc);
1434        else
1435free_skb:
1436                dev_kfree_skb(nc);
1437
1438        return ret;
1439}
1440
1441SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1442                const struct sigevent __user *, u_notification)
1443{
1444        struct sigevent n, *p = NULL;
1445        if (u_notification) {
1446                if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1447                        return -EFAULT;
1448                p = &n;
1449        }
1450        return do_mq_notify(mqdes, p);
1451}
1452
1453static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1454{
1455        struct fd f;
1456        struct inode *inode;
1457        struct mqueue_inode_info *info;
1458
1459        if (new && (new->mq_flags & (~O_NONBLOCK)))
1460                return -EINVAL;
1461
1462        f = fdget(mqdes);
1463        if (!f.file)
1464                return -EBADF;
1465
1466        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1467                fdput(f);
1468                return -EBADF;
1469        }
1470
1471        inode = file_inode(f.file);
1472        info = MQUEUE_I(inode);
1473
1474        spin_lock(&info->lock);
1475
1476        if (old) {
1477                *old = info->attr;
1478                old->mq_flags = f.file->f_flags & O_NONBLOCK;
1479        }
1480        if (new) {
1481                audit_mq_getsetattr(mqdes, new);
1482                spin_lock(&f.file->f_lock);
1483                if (new->mq_flags & O_NONBLOCK)
1484                        f.file->f_flags |= O_NONBLOCK;
1485                else
1486                        f.file->f_flags &= ~O_NONBLOCK;
1487                spin_unlock(&f.file->f_lock);
1488
1489                inode->i_atime = inode->i_ctime = current_time(inode);
1490        }
1491
1492        spin_unlock(&info->lock);
1493        fdput(f);
1494        return 0;
1495}
1496
1497SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1498                const struct mq_attr __user *, u_mqstat,
1499                struct mq_attr __user *, u_omqstat)
1500{
1501        int ret;
1502        struct mq_attr mqstat, omqstat;
1503        struct mq_attr *new = NULL, *old = NULL;
1504
1505        if (u_mqstat) {
1506                new = &mqstat;
1507                if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1508                        return -EFAULT;
1509        }
1510        if (u_omqstat)
1511                old = &omqstat;
1512
1513        ret = do_mq_getsetattr(mqdes, new, old);
1514        if (ret || !old)
1515                return ret;
1516
1517        if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1518                return -EFAULT;
1519        return 0;
1520}
1521
1522#ifdef CONFIG_COMPAT
1523
1524struct compat_mq_attr {
1525        compat_long_t mq_flags;      /* message queue flags                  */
1526        compat_long_t mq_maxmsg;     /* maximum number of messages           */
1527        compat_long_t mq_msgsize;    /* maximum message size                 */
1528        compat_long_t mq_curmsgs;    /* number of messages currently queued  */
1529        compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1530};
1531
1532static inline int get_compat_mq_attr(struct mq_attr *attr,
1533                        const struct compat_mq_attr __user *uattr)
1534{
1535        struct compat_mq_attr v;
1536
1537        if (copy_from_user(&v, uattr, sizeof(*uattr)))
1538                return -EFAULT;
1539
1540        memset(attr, 0, sizeof(*attr));
1541        attr->mq_flags = v.mq_flags;
1542        attr->mq_maxmsg = v.mq_maxmsg;
1543        attr->mq_msgsize = v.mq_msgsize;
1544        attr->mq_curmsgs = v.mq_curmsgs;
1545        return 0;
1546}
1547
1548static inline int put_compat_mq_attr(const struct mq_attr *attr,
1549                        struct compat_mq_attr __user *uattr)
1550{
1551        struct compat_mq_attr v;
1552
1553        memset(&v, 0, sizeof(v));
1554        v.mq_flags = attr->mq_flags;
1555        v.mq_maxmsg = attr->mq_maxmsg;
1556        v.mq_msgsize = attr->mq_msgsize;
1557        v.mq_curmsgs = attr->mq_curmsgs;
1558        if (copy_to_user(uattr, &v, sizeof(*uattr)))
1559                return -EFAULT;
1560        return 0;
1561}
1562
1563COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1564                       int, oflag, compat_mode_t, mode,
1565                       struct compat_mq_attr __user *, u_attr)
1566{
1567        struct mq_attr attr, *p = NULL;
1568        if (u_attr && oflag & O_CREAT) {
1569                p = &attr;
1570                if (get_compat_mq_attr(&attr, u_attr))
1571                        return -EFAULT;
1572        }
1573        return do_mq_open(u_name, oflag, mode, p);
1574}
1575
1576COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1577                       const struct compat_sigevent __user *, u_notification)
1578{
1579        struct sigevent n, *p = NULL;
1580        if (u_notification) {
1581                if (get_compat_sigevent(&n, u_notification))
1582                        return -EFAULT;
1583                if (n.sigev_notify == SIGEV_THREAD)
1584                        n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1585                p = &n;
1586        }
1587        return do_mq_notify(mqdes, p);
1588}
1589
1590COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1591                       const struct compat_mq_attr __user *, u_mqstat,
1592                       struct compat_mq_attr __user *, u_omqstat)
1593{
1594        int ret;
1595        struct mq_attr mqstat, omqstat;
1596        struct mq_attr *new = NULL, *old = NULL;
1597
1598        if (u_mqstat) {
1599                new = &mqstat;
1600                if (get_compat_mq_attr(new, u_mqstat))
1601                        return -EFAULT;
1602        }
1603        if (u_omqstat)
1604                old = &omqstat;
1605
1606        ret = do_mq_getsetattr(mqdes, new, old);
1607        if (ret || !old)
1608                return ret;
1609
1610        if (put_compat_mq_attr(old, u_omqstat))
1611                return -EFAULT;
1612        return 0;
1613}
1614#endif
1615
1616#ifdef CONFIG_COMPAT_32BIT_TIME
1617static int compat_prepare_timeout(const struct old_timespec32 __user *p,
1618                                   struct timespec64 *ts)
1619{
1620        if (get_old_timespec32(ts, p))
1621                return -EFAULT;
1622        if (!timespec64_valid(ts))
1623                return -EINVAL;
1624        return 0;
1625}
1626
1627SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
1628                const char __user *, u_msg_ptr,
1629                unsigned int, msg_len, unsigned int, msg_prio,
1630                const struct old_timespec32 __user *, u_abs_timeout)
1631{
1632        struct timespec64 ts, *p = NULL;
1633        if (u_abs_timeout) {
1634                int res = compat_prepare_timeout(u_abs_timeout, &ts);
1635                if (res)
1636                        return res;
1637                p = &ts;
1638        }
1639        return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1640}
1641
1642SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
1643                char __user *, u_msg_ptr,
1644                unsigned int, msg_len, unsigned int __user *, u_msg_prio,
1645                const struct old_timespec32 __user *, u_abs_timeout)
1646{
1647        struct timespec64 ts, *p = NULL;
1648        if (u_abs_timeout) {
1649                int res = compat_prepare_timeout(u_abs_timeout, &ts);
1650                if (res)
1651                        return res;
1652                p = &ts;
1653        }
1654        return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1655}
1656#endif
1657
1658static const struct inode_operations mqueue_dir_inode_operations = {
1659        .lookup = simple_lookup,
1660        .create = mqueue_create,
1661        .unlink = mqueue_unlink,
1662};
1663
1664static const struct file_operations mqueue_file_operations = {
1665        .flush = mqueue_flush_file,
1666        .poll = mqueue_poll_file,
1667        .read = mqueue_read_file,
1668        .llseek = default_llseek,
1669};
1670
1671static const struct super_operations mqueue_super_ops = {
1672        .alloc_inode = mqueue_alloc_inode,
1673        .free_inode = mqueue_free_inode,
1674        .evict_inode = mqueue_evict_inode,
1675        .statfs = simple_statfs,
1676};
1677
1678static const struct fs_context_operations mqueue_fs_context_ops = {
1679        .free           = mqueue_fs_context_free,
1680        .get_tree       = mqueue_get_tree,
1681};
1682
1683static struct file_system_type mqueue_fs_type = {
1684        .name                   = "mqueue",
1685        .init_fs_context        = mqueue_init_fs_context,
1686        .kill_sb                = kill_litter_super,
1687        .fs_flags               = FS_USERNS_MOUNT,
1688};
1689
1690int mq_init_ns(struct ipc_namespace *ns)
1691{
1692        struct vfsmount *m;
1693
1694        ns->mq_queues_count  = 0;
1695        ns->mq_queues_max    = DFLT_QUEUESMAX;
1696        ns->mq_msg_max       = DFLT_MSGMAX;
1697        ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
1698        ns->mq_msg_default   = DFLT_MSG;
1699        ns->mq_msgsize_default  = DFLT_MSGSIZE;
1700
1701        m = mq_create_mount(ns);
1702        if (IS_ERR(m))
1703                return PTR_ERR(m);
1704        ns->mq_mnt = m;
1705        return 0;
1706}
1707
1708void mq_clear_sbinfo(struct ipc_namespace *ns)
1709{
1710        ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1711}
1712
1713void mq_put_mnt(struct ipc_namespace *ns)
1714{
1715        kern_unmount(ns->mq_mnt);
1716}
1717
1718static int __init init_mqueue_fs(void)
1719{
1720        int error;
1721
1722        mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1723                                sizeof(struct mqueue_inode_info), 0,
1724                                SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1725        if (mqueue_inode_cachep == NULL)
1726                return -ENOMEM;
1727
1728        if (!setup_mq_sysctls(&init_ipc_ns)) {
1729                pr_warn("sysctl registration failed\n");
1730                return -ENOMEM;
1731        }
1732
1733        error = register_filesystem(&mqueue_fs_type);
1734        if (error)
1735                goto out_sysctl;
1736
1737        spin_lock_init(&mq_lock);
1738
1739        error = mq_init_ns(&init_ipc_ns);
1740        if (error)
1741                goto out_filesystem;
1742
1743        return 0;
1744
1745out_filesystem:
1746        unregister_filesystem(&mqueue_fs_type);
1747out_sysctl:
1748        kmem_cache_destroy(mqueue_inode_cachep);
1749        retire_mq_sysctls(&init_ipc_ns);
1750        return error;
1751}
1752
1753device_initcall(init_mqueue_fs);
1754