linux/ipc/sem.c
<<
>>
Prefs
   1/*
   2 * linux/ipc/sem.c
   3 * Copyright (C) 1992 Krishna Balasubramanian
   4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   5 *
   6 * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995):
   7 * This code underwent a massive rewrite in order to solve some problems
   8 * with the original code. In particular the original code failed to
   9 * wake up processes that were waiting for semval to go to 0 if the
  10 * value went to 0 and was then incremented rapidly enough. In solving
  11 * this problem I have also modified the implementation so that it
  12 * processes pending operations in a FIFO manner, thus give a guarantee
  13 * that processes waiting for a lock on the semaphore won't starve
  14 * unless another locking process fails to unlock.
  15 * In addition the following two changes in behavior have been introduced:
  16 * - The original implementation of semop returned the value
  17 *   last semaphore element examined on success. This does not
  18 *   match the manual page specifications, and effectively
  19 *   allows the user to read the semaphore even if they do not
  20 *   have read permissions. The implementation now returns 0
  21 *   on success as stated in the manual page.
  22 * - There is some confusion over whether the set of undo adjustments
  23 *   to be performed at exit should be done in an atomic manner.
  24 *   That is, if we are attempting to decrement the semval should we queue
  25 *   up and wait until we can do so legally?
  26 *   The original implementation attempted to do this.
  27 *   The current implementation does not do so. This is because I don't
  28 *   think it is the right thing (TM) to do, and because I couldn't
  29 *   see a clean way to get the old behavior with the new design.
  30 *   The POSIX standard and SVID should be consulted to determine
  31 *   what behavior is mandated.
  32 *
  33 * Further notes on refinement (Christoph Rohland, December 1998):
  34 * - The POSIX standard says, that the undo adjustments simply should
  35 *   redo. So the current implementation is o.K.
  36 * - The previous code had two flaws:
  37 *   1) It actively gave the semaphore to the next waiting process
  38 *      sleeping on the semaphore. Since this process did not have the
  39 *      cpu this led to many unnecessary context switches and bad
  40 *      performance. Now we only check which process should be able to
  41 *      get the semaphore and if this process wants to reduce some
  42 *      semaphore value we simply wake it up without doing the
  43 *      operation. So it has to try to get it later. Thus e.g. the
  44 *      running process may reacquire the semaphore during the current
  45 *      time slice. If it only waits for zero or increases the semaphore,
  46 *      we do the operation in advance and wake it up.
  47 *   2) It did not wake up all zero waiting processes. We try to do
  48 *      better but only get the semops right which only wait for zero or
  49 *      increase. If there are decrement operations in the operations
  50 *      array we do the same as before.
  51 *
  52 * With the incarnation of O(1) scheduler, it becomes unnecessary to perform
  53 * check/retry algorithm for waking up blocked processes as the new scheduler
  54 * is better at handling thread switch than the old one.
  55 *
  56 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  57 *
  58 * SMP-threaded, sysctl's added
  59 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  60 * Enforced range limit on SEM_UNDO
  61 * (c) 2001 Red Hat Inc <alan@redhat.com>
  62 * Lockless wakeup
  63 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
  64 *
  65 * support for audit of ipc object properties and permission changes
  66 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  67 *
  68 * namespaces support
  69 * OpenVZ, SWsoft Inc.
  70 * Pavel Emelianov <xemul@openvz.org>
  71 */
  72
  73#include <linux/slab.h>
  74#include <linux/spinlock.h>
  75#include <linux/init.h>
  76#include <linux/proc_fs.h>
  77#include <linux/time.h>
  78#include <linux/security.h>
  79#include <linux/syscalls.h>
  80#include <linux/audit.h>
  81#include <linux/capability.h>
  82#include <linux/seq_file.h>
  83#include <linux/rwsem.h>
  84#include <linux/nsproxy.h>
  85#include <linux/ipc_namespace.h>
  86
  87#include <asm/uaccess.h>
  88#include "util.h"
  89
  90#define sem_ids(ns)     ((ns)->ids[IPC_SEM_IDS])
  91
  92#define sem_unlock(sma)         ipc_unlock(&(sma)->sem_perm)
  93#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
  94
  95static int newary(struct ipc_namespace *, struct ipc_params *);
  96static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
  97#ifdef CONFIG_PROC_FS
  98static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
  99#endif
 100
 101#define SEMMSL_FAST     256 /* 512 bytes on stack */
 102#define SEMOPM_FAST     64  /* ~ 372 bytes on stack */
 103
 104/*
 105 * linked list protection:
 106 *      sem_undo.id_next,
 107 *      sem_array.sem_pending{,last},
 108 *      sem_array.sem_undo: sem_lock() for read/write
 109 *      sem_undo.proc_next: only "current" is allowed to read/write that field.
 110 *      
 111 */
 112
 113#define sc_semmsl       sem_ctls[0]
 114#define sc_semmns       sem_ctls[1]
 115#define sc_semopm       sem_ctls[2]
 116#define sc_semmni       sem_ctls[3]
 117
 118void sem_init_ns(struct ipc_namespace *ns)
 119{
 120        ns->sc_semmsl = SEMMSL;
 121        ns->sc_semmns = SEMMNS;
 122        ns->sc_semopm = SEMOPM;
 123        ns->sc_semmni = SEMMNI;
 124        ns->used_sems = 0;
 125        ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 126}
 127
 128#ifdef CONFIG_IPC_NS
 129void sem_exit_ns(struct ipc_namespace *ns)
 130{
 131        free_ipcs(ns, &sem_ids(ns), freeary);
 132        idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
 133}
 134#endif
 135
 136void __init sem_init (void)
 137{
 138        sem_init_ns(&init_ipc_ns);
 139        ipc_init_proc_interface("sysvipc/sem",
 140                                "       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 141                                IPC_SEM_IDS, sysvipc_sem_proc_show);
 142}
 143
 144/*
 145 * sem_lock_(check_) routines are called in the paths where the rw_mutex
 146 * is not held.
 147 */
 148static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
 149{
 150        struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
 151
 152        if (IS_ERR(ipcp))
 153                return (struct sem_array *)ipcp;
 154
 155        return container_of(ipcp, struct sem_array, sem_perm);
 156}
 157
 158static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
 159                                                int id)
 160{
 161        struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
 162
 163        if (IS_ERR(ipcp))
 164                return (struct sem_array *)ipcp;
 165
 166        return container_of(ipcp, struct sem_array, sem_perm);
 167}
 168
 169static inline void sem_lock_and_putref(struct sem_array *sma)
 170{
 171        ipc_lock_by_ptr(&sma->sem_perm);
 172        ipc_rcu_putref(sma);
 173}
 174
 175static inline void sem_getref_and_unlock(struct sem_array *sma)
 176{
 177        ipc_rcu_getref(sma);
 178        ipc_unlock(&(sma)->sem_perm);
 179}
 180
 181static inline void sem_putref(struct sem_array *sma)
 182{
 183        ipc_lock_by_ptr(&sma->sem_perm);
 184        ipc_rcu_putref(sma);
 185        ipc_unlock(&(sma)->sem_perm);
 186}
 187
 188static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 189{
 190        ipc_rmid(&sem_ids(ns), &s->sem_perm);
 191}
 192
 193/*
 194 * Lockless wakeup algorithm:
 195 * Without the check/retry algorithm a lockless wakeup is possible:
 196 * - queue.status is initialized to -EINTR before blocking.
 197 * - wakeup is performed by
 198 *      * unlinking the queue entry from sma->sem_pending
 199 *      * setting queue.status to IN_WAKEUP
 200 *        This is the notification for the blocked thread that a
 201 *        result value is imminent.
 202 *      * call wake_up_process
 203 *      * set queue.status to the final value.
 204 * - the previously blocked thread checks queue.status:
 205 *      * if it's IN_WAKEUP, then it must wait until the value changes
 206 *      * if it's not -EINTR, then the operation was completed by
 207 *        update_queue. semtimedop can return queue.status without
 208 *        performing any operation on the sem array.
 209 *      * otherwise it must acquire the spinlock and check what's up.
 210 *
 211 * The two-stage algorithm is necessary to protect against the following
 212 * races:
 213 * - if queue.status is set after wake_up_process, then the woken up idle
 214 *   thread could race forward and try (and fail) to acquire sma->lock
 215 *   before update_queue had a chance to set queue.status
 216 * - if queue.status is written before wake_up_process and if the
 217 *   blocked process is woken up by a signal between writing
 218 *   queue.status and the wake_up_process, then the woken up
 219 *   process could return from semtimedop and die by calling
 220 *   sys_exit before wake_up_process is called. Then wake_up_process
 221 *   will oops, because the task structure is already invalid.
 222 *   (yes, this happened on s390 with sysv msg).
 223 *
 224 */
 225#define IN_WAKEUP       1
 226
 227/**
 228 * newary - Create a new semaphore set
 229 * @ns: namespace
 230 * @params: ptr to the structure that contains key, semflg and nsems
 231 *
 232 * Called with sem_ids.rw_mutex held (as a writer)
 233 */
 234
 235static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 236{
 237        int id;
 238        int retval;
 239        struct sem_array *sma;
 240        int size;
 241        key_t key = params->key;
 242        int nsems = params->u.nsems;
 243        int semflg = params->flg;
 244
 245        if (!nsems)
 246                return -EINVAL;
 247        if (ns->used_sems + nsems > ns->sc_semmns)
 248                return -ENOSPC;
 249
 250        size = sizeof (*sma) + nsems * sizeof (struct sem);
 251        sma = ipc_rcu_alloc(size);
 252        if (!sma) {
 253                return -ENOMEM;
 254        }
 255        memset (sma, 0, size);
 256
 257        sma->sem_perm.mode = (semflg & S_IRWXUGO);
 258        sma->sem_perm.key = key;
 259
 260        sma->sem_perm.security = NULL;
 261        retval = security_sem_alloc(sma);
 262        if (retval) {
 263                ipc_rcu_putref(sma);
 264                return retval;
 265        }
 266
 267        id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 268        if (id < 0) {
 269                security_sem_free(sma);
 270                ipc_rcu_putref(sma);
 271                return id;
 272        }
 273        ns->used_sems += nsems;
 274
 275        sma->sem_base = (struct sem *) &sma[1];
 276        INIT_LIST_HEAD(&sma->sem_pending);
 277        INIT_LIST_HEAD(&sma->list_id);
 278        sma->sem_nsems = nsems;
 279        sma->sem_ctime = get_seconds();
 280        sem_unlock(sma);
 281
 282        return sma->sem_perm.id;
 283}
 284
 285
 286/*
 287 * Called with sem_ids.rw_mutex and ipcp locked.
 288 */
 289static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
 290{
 291        struct sem_array *sma;
 292
 293        sma = container_of(ipcp, struct sem_array, sem_perm);
 294        return security_sem_associate(sma, semflg);
 295}
 296
 297/*
 298 * Called with sem_ids.rw_mutex and ipcp locked.
 299 */
 300static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
 301                                struct ipc_params *params)
 302{
 303        struct sem_array *sma;
 304
 305        sma = container_of(ipcp, struct sem_array, sem_perm);
 306        if (params->u.nsems > sma->sem_nsems)
 307                return -EINVAL;
 308
 309        return 0;
 310}
 311
 312SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 313{
 314        struct ipc_namespace *ns;
 315        struct ipc_ops sem_ops;
 316        struct ipc_params sem_params;
 317
 318        ns = current->nsproxy->ipc_ns;
 319
 320        if (nsems < 0 || nsems > ns->sc_semmsl)
 321                return -EINVAL;
 322
 323        sem_ops.getnew = newary;
 324        sem_ops.associate = sem_security;
 325        sem_ops.more_checks = sem_more_checks;
 326
 327        sem_params.key = key;
 328        sem_params.flg = semflg;
 329        sem_params.u.nsems = nsems;
 330
 331        return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 332}
 333
 334/*
 335 * Determine whether a sequence of semaphore operations would succeed
 336 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
 337 */
 338
 339static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
 340                             int nsops, struct sem_undo *un, int pid)
 341{
 342        int result, sem_op;
 343        struct sembuf *sop;
 344        struct sem * curr;
 345
 346        for (sop = sops; sop < sops + nsops; sop++) {
 347                curr = sma->sem_base + sop->sem_num;
 348                sem_op = sop->sem_op;
 349                result = curr->semval;
 350  
 351                if (!sem_op && result)
 352                        goto would_block;
 353
 354                result += sem_op;
 355                if (result < 0)
 356                        goto would_block;
 357                if (result > SEMVMX)
 358                        goto out_of_range;
 359                if (sop->sem_flg & SEM_UNDO) {
 360                        int undo = un->semadj[sop->sem_num] - sem_op;
 361                        /*
 362                         *      Exceeding the undo range is an error.
 363                         */
 364                        if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 365                                goto out_of_range;
 366                }
 367                curr->semval = result;
 368        }
 369
 370        sop--;
 371        while (sop >= sops) {
 372                sma->sem_base[sop->sem_num].sempid = pid;
 373                if (sop->sem_flg & SEM_UNDO)
 374                        un->semadj[sop->sem_num] -= sop->sem_op;
 375                sop--;
 376        }
 377        
 378        sma->sem_otime = get_seconds();
 379        return 0;
 380
 381out_of_range:
 382        result = -ERANGE;
 383        goto undo;
 384
 385would_block:
 386        if (sop->sem_flg & IPC_NOWAIT)
 387                result = -EAGAIN;
 388        else
 389                result = 1;
 390
 391undo:
 392        sop--;
 393        while (sop >= sops) {
 394                sma->sem_base[sop->sem_num].semval -= sop->sem_op;
 395                sop--;
 396        }
 397
 398        return result;
 399}
 400
 401/* Go through the pending queue for the indicated semaphore
 402 * looking for tasks that can be completed.
 403 */
 404static void update_queue (struct sem_array * sma)
 405{
 406        int error;
 407        struct sem_queue * q;
 408
 409        q = list_entry(sma->sem_pending.next, struct sem_queue, list);
 410        while (&q->list != &sma->sem_pending) {
 411                error = try_atomic_semop(sma, q->sops, q->nsops,
 412                                         q->undo, q->pid);
 413
 414                /* Does q->sleeper still need to sleep? */
 415                if (error <= 0) {
 416                        struct sem_queue *n;
 417
 418                        /*
 419                         * Continue scanning. The next operation
 420                         * that must be checked depends on the type of the
 421                         * completed operation:
 422                         * - if the operation modified the array, then
 423                         *   restart from the head of the queue and
 424                         *   check for threads that might be waiting
 425                         *   for semaphore values to become 0.
 426                         * - if the operation didn't modify the array,
 427                         *   then just continue.
 428                         * The order of list_del() and reading ->next
 429                         * is crucial: In the former case, the list_del()
 430                         * must be done first [because we might be the
 431                         * first entry in ->sem_pending], in the latter
 432                         * case the list_del() must be done last
 433                         * [because the list is invalid after the list_del()]
 434                         */
 435                        if (q->alter) {
 436                                list_del(&q->list);
 437                                n = list_entry(sma->sem_pending.next,
 438                                                struct sem_queue, list);
 439                        } else {
 440                                n = list_entry(q->list.next, struct sem_queue,
 441                                                list);
 442                                list_del(&q->list);
 443                        }
 444
 445                        /* wake up the waiting thread */
 446                        q->status = IN_WAKEUP;
 447
 448                        wake_up_process(q->sleeper);
 449                        /* hands-off: q will disappear immediately after
 450                         * writing q->status.
 451                         */
 452                        smp_wmb();
 453                        q->status = error;
 454                        q = n;
 455                } else {
 456                        q = list_entry(q->list.next, struct sem_queue, list);
 457                }
 458        }
 459}
 460
 461/* The following counts are associated to each semaphore:
 462 *   semncnt        number of tasks waiting on semval being nonzero
 463 *   semzcnt        number of tasks waiting on semval being zero
 464 * This model assumes that a task waits on exactly one semaphore.
 465 * Since semaphore operations are to be performed atomically, tasks actually
 466 * wait on a whole sequence of semaphores simultaneously.
 467 * The counts we return here are a rough approximation, but still
 468 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
 469 */
 470static int count_semncnt (struct sem_array * sma, ushort semnum)
 471{
 472        int semncnt;
 473        struct sem_queue * q;
 474
 475        semncnt = 0;
 476        list_for_each_entry(q, &sma->sem_pending, list) {
 477                struct sembuf * sops = q->sops;
 478                int nsops = q->nsops;
 479                int i;
 480                for (i = 0; i < nsops; i++)
 481                        if (sops[i].sem_num == semnum
 482                            && (sops[i].sem_op < 0)
 483                            && !(sops[i].sem_flg & IPC_NOWAIT))
 484                                semncnt++;
 485        }
 486        return semncnt;
 487}
 488
 489static int count_semzcnt (struct sem_array * sma, ushort semnum)
 490{
 491        int semzcnt;
 492        struct sem_queue * q;
 493
 494        semzcnt = 0;
 495        list_for_each_entry(q, &sma->sem_pending, list) {
 496                struct sembuf * sops = q->sops;
 497                int nsops = q->nsops;
 498                int i;
 499                for (i = 0; i < nsops; i++)
 500                        if (sops[i].sem_num == semnum
 501                            && (sops[i].sem_op == 0)
 502                            && !(sops[i].sem_flg & IPC_NOWAIT))
 503                                semzcnt++;
 504        }
 505        return semzcnt;
 506}
 507
 508void free_un(struct rcu_head *head)
 509{
 510        struct sem_undo *un = container_of(head, struct sem_undo, rcu);
 511        kfree(un);
 512}
 513
 514/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
 515 * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
 516 * remains locked on exit.
 517 */
 518static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 519{
 520        struct sem_undo *un, *tu;
 521        struct sem_queue *q, *tq;
 522        struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
 523
 524        /* Free the existing undo structures for this semaphore set.  */
 525        assert_spin_locked(&sma->sem_perm.lock);
 526        list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
 527                list_del(&un->list_id);
 528                spin_lock(&un->ulp->lock);
 529                un->semid = -1;
 530                list_del_rcu(&un->list_proc);
 531                spin_unlock(&un->ulp->lock);
 532                call_rcu(&un->rcu, free_un);
 533        }
 534
 535        /* Wake up all pending processes and let them fail with EIDRM. */
 536        list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
 537                list_del(&q->list);
 538
 539                q->status = IN_WAKEUP;
 540                wake_up_process(q->sleeper); /* doesn't sleep */
 541                smp_wmb();
 542                q->status = -EIDRM;     /* hands-off q */
 543        }
 544
 545        /* Remove the semaphore set from the IDR */
 546        sem_rmid(ns, sma);
 547        sem_unlock(sma);
 548
 549        ns->used_sems -= sma->sem_nsems;
 550        security_sem_free(sma);
 551        ipc_rcu_putref(sma);
 552}
 553
 554static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
 555{
 556        switch(version) {
 557        case IPC_64:
 558                return copy_to_user(buf, in, sizeof(*in));
 559        case IPC_OLD:
 560            {
 561                struct semid_ds out;
 562
 563                memset(&out, 0, sizeof(out));
 564
 565                ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
 566
 567                out.sem_otime   = in->sem_otime;
 568                out.sem_ctime   = in->sem_ctime;
 569                out.sem_nsems   = in->sem_nsems;
 570
 571                return copy_to_user(buf, &out, sizeof(out));
 572            }
 573        default:
 574                return -EINVAL;
 575        }
 576}
 577
 578static int semctl_nolock(struct ipc_namespace *ns, int semid,
 579                         int cmd, int version, union semun arg)
 580{
 581        int err = -EINVAL;
 582        struct sem_array *sma;
 583
 584        switch(cmd) {
 585        case IPC_INFO:
 586        case SEM_INFO:
 587        {
 588                struct seminfo seminfo;
 589                int max_id;
 590
 591                err = security_sem_semctl(NULL, cmd);
 592                if (err)
 593                        return err;
 594                
 595                memset(&seminfo,0,sizeof(seminfo));
 596                seminfo.semmni = ns->sc_semmni;
 597                seminfo.semmns = ns->sc_semmns;
 598                seminfo.semmsl = ns->sc_semmsl;
 599                seminfo.semopm = ns->sc_semopm;
 600                seminfo.semvmx = SEMVMX;
 601                seminfo.semmnu = SEMMNU;
 602                seminfo.semmap = SEMMAP;
 603                seminfo.semume = SEMUME;
 604                down_read(&sem_ids(ns).rw_mutex);
 605                if (cmd == SEM_INFO) {
 606                        seminfo.semusz = sem_ids(ns).in_use;
 607                        seminfo.semaem = ns->used_sems;
 608                } else {
 609                        seminfo.semusz = SEMUSZ;
 610                        seminfo.semaem = SEMAEM;
 611                }
 612                max_id = ipc_get_maxid(&sem_ids(ns));
 613                up_read(&sem_ids(ns).rw_mutex);
 614                if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) 
 615                        return -EFAULT;
 616                return (max_id < 0) ? 0: max_id;
 617        }
 618        case IPC_STAT:
 619        case SEM_STAT:
 620        {
 621                struct semid64_ds tbuf;
 622                int id;
 623
 624                if (cmd == SEM_STAT) {
 625                        sma = sem_lock(ns, semid);
 626                        if (IS_ERR(sma))
 627                                return PTR_ERR(sma);
 628                        id = sma->sem_perm.id;
 629                } else {
 630                        sma = sem_lock_check(ns, semid);
 631                        if (IS_ERR(sma))
 632                                return PTR_ERR(sma);
 633                        id = 0;
 634                }
 635
 636                err = -EACCES;
 637                if (ipcperms (&sma->sem_perm, S_IRUGO))
 638                        goto out_unlock;
 639
 640                err = security_sem_semctl(sma, cmd);
 641                if (err)
 642                        goto out_unlock;
 643
 644                memset(&tbuf, 0, sizeof(tbuf));
 645
 646                kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
 647                tbuf.sem_otime  = sma->sem_otime;
 648                tbuf.sem_ctime  = sma->sem_ctime;
 649                tbuf.sem_nsems  = sma->sem_nsems;
 650                sem_unlock(sma);
 651                if (copy_semid_to_user (arg.buf, &tbuf, version))
 652                        return -EFAULT;
 653                return id;
 654        }
 655        default:
 656                return -EINVAL;
 657        }
 658        return err;
 659out_unlock:
 660        sem_unlock(sma);
 661        return err;
 662}
 663
 664static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
 665                int cmd, int version, union semun arg)
 666{
 667        struct sem_array *sma;
 668        struct sem* curr;
 669        int err;
 670        ushort fast_sem_io[SEMMSL_FAST];
 671        ushort* sem_io = fast_sem_io;
 672        int nsems;
 673
 674        sma = sem_lock_check(ns, semid);
 675        if (IS_ERR(sma))
 676                return PTR_ERR(sma);
 677
 678        nsems = sma->sem_nsems;
 679
 680        err = -EACCES;
 681        if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO))
 682                goto out_unlock;
 683
 684        err = security_sem_semctl(sma, cmd);
 685        if (err)
 686                goto out_unlock;
 687
 688        err = -EACCES;
 689        switch (cmd) {
 690        case GETALL:
 691        {
 692                ushort __user *array = arg.array;
 693                int i;
 694
 695                if(nsems > SEMMSL_FAST) {
 696                        sem_getref_and_unlock(sma);
 697
 698                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
 699                        if(sem_io == NULL) {
 700                                sem_putref(sma);
 701                                return -ENOMEM;
 702                        }
 703
 704                        sem_lock_and_putref(sma);
 705                        if (sma->sem_perm.deleted) {
 706                                sem_unlock(sma);
 707                                err = -EIDRM;
 708                                goto out_free;
 709                        }
 710                }
 711
 712                for (i = 0; i < sma->sem_nsems; i++)
 713                        sem_io[i] = sma->sem_base[i].semval;
 714                sem_unlock(sma);
 715                err = 0;
 716                if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
 717                        err = -EFAULT;
 718                goto out_free;
 719        }
 720        case SETALL:
 721        {
 722                int i;
 723                struct sem_undo *un;
 724
 725                sem_getref_and_unlock(sma);
 726
 727                if(nsems > SEMMSL_FAST) {
 728                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
 729                        if(sem_io == NULL) {
 730                                sem_putref(sma);
 731                                return -ENOMEM;
 732                        }
 733                }
 734
 735                if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) {
 736                        sem_putref(sma);
 737                        err = -EFAULT;
 738                        goto out_free;
 739                }
 740
 741                for (i = 0; i < nsems; i++) {
 742                        if (sem_io[i] > SEMVMX) {
 743                                sem_putref(sma);
 744                                err = -ERANGE;
 745                                goto out_free;
 746                        }
 747                }
 748                sem_lock_and_putref(sma);
 749                if (sma->sem_perm.deleted) {
 750                        sem_unlock(sma);
 751                        err = -EIDRM;
 752                        goto out_free;
 753                }
 754
 755                for (i = 0; i < nsems; i++)
 756                        sma->sem_base[i].semval = sem_io[i];
 757
 758                assert_spin_locked(&sma->sem_perm.lock);
 759                list_for_each_entry(un, &sma->list_id, list_id) {
 760                        for (i = 0; i < nsems; i++)
 761                                un->semadj[i] = 0;
 762                }
 763                sma->sem_ctime = get_seconds();
 764                /* maybe some queued-up processes were waiting for this */
 765                update_queue(sma);
 766                err = 0;
 767                goto out_unlock;
 768        }
 769        /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
 770        }
 771        err = -EINVAL;
 772        if(semnum < 0 || semnum >= nsems)
 773                goto out_unlock;
 774
 775        curr = &sma->sem_base[semnum];
 776
 777        switch (cmd) {
 778        case GETVAL:
 779                err = curr->semval;
 780                goto out_unlock;
 781        case GETPID:
 782                err = curr->sempid;
 783                goto out_unlock;
 784        case GETNCNT:
 785                err = count_semncnt(sma,semnum);
 786                goto out_unlock;
 787        case GETZCNT:
 788                err = count_semzcnt(sma,semnum);
 789                goto out_unlock;
 790        case SETVAL:
 791        {
 792                int val = arg.val;
 793                struct sem_undo *un;
 794
 795                err = -ERANGE;
 796                if (val > SEMVMX || val < 0)
 797                        goto out_unlock;
 798
 799                assert_spin_locked(&sma->sem_perm.lock);
 800                list_for_each_entry(un, &sma->list_id, list_id)
 801                        un->semadj[semnum] = 0;
 802
 803                curr->semval = val;
 804                curr->sempid = task_tgid_vnr(current);
 805                sma->sem_ctime = get_seconds();
 806                /* maybe some queued-up processes were waiting for this */
 807                update_queue(sma);
 808                err = 0;
 809                goto out_unlock;
 810        }
 811        }
 812out_unlock:
 813        sem_unlock(sma);
 814out_free:
 815        if(sem_io != fast_sem_io)
 816                ipc_free(sem_io, sizeof(ushort)*nsems);
 817        return err;
 818}
 819
 820static inline unsigned long
 821copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
 822{
 823        switch(version) {
 824        case IPC_64:
 825                if (copy_from_user(out, buf, sizeof(*out)))
 826                        return -EFAULT;
 827                return 0;
 828        case IPC_OLD:
 829            {
 830                struct semid_ds tbuf_old;
 831
 832                if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 833                        return -EFAULT;
 834
 835                out->sem_perm.uid       = tbuf_old.sem_perm.uid;
 836                out->sem_perm.gid       = tbuf_old.sem_perm.gid;
 837                out->sem_perm.mode      = tbuf_old.sem_perm.mode;
 838
 839                return 0;
 840            }
 841        default:
 842                return -EINVAL;
 843        }
 844}
 845
 846/*
 847 * This function handles some semctl commands which require the rw_mutex
 848 * to be held in write mode.
 849 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
 850 */
 851static int semctl_down(struct ipc_namespace *ns, int semid,
 852                       int cmd, int version, union semun arg)
 853{
 854        struct sem_array *sma;
 855        int err;
 856        struct semid64_ds semid64;
 857        struct kern_ipc_perm *ipcp;
 858
 859        if(cmd == IPC_SET) {
 860                if (copy_semid_from_user(&semid64, arg.buf, version))
 861                        return -EFAULT;
 862        }
 863
 864        ipcp = ipcctl_pre_down(&sem_ids(ns), semid, cmd, &semid64.sem_perm, 0);
 865        if (IS_ERR(ipcp))
 866                return PTR_ERR(ipcp);
 867
 868        sma = container_of(ipcp, struct sem_array, sem_perm);
 869
 870        err = security_sem_semctl(sma, cmd);
 871        if (err)
 872                goto out_unlock;
 873
 874        switch(cmd){
 875        case IPC_RMID:
 876                freeary(ns, ipcp);
 877                goto out_up;
 878        case IPC_SET:
 879                ipc_update_perm(&semid64.sem_perm, ipcp);
 880                sma->sem_ctime = get_seconds();
 881                break;
 882        default:
 883                err = -EINVAL;
 884        }
 885
 886out_unlock:
 887        sem_unlock(sma);
 888out_up:
 889        up_write(&sem_ids(ns).rw_mutex);
 890        return err;
 891}
 892
 893SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg)
 894{
 895        int err = -EINVAL;
 896        int version;
 897        struct ipc_namespace *ns;
 898
 899        if (semid < 0)
 900                return -EINVAL;
 901
 902        version = ipc_parse_version(&cmd);
 903        ns = current->nsproxy->ipc_ns;
 904
 905        switch(cmd) {
 906        case IPC_INFO:
 907        case SEM_INFO:
 908        case IPC_STAT:
 909        case SEM_STAT:
 910                err = semctl_nolock(ns, semid, cmd, version, arg);
 911                return err;
 912        case GETALL:
 913        case GETVAL:
 914        case GETPID:
 915        case GETNCNT:
 916        case GETZCNT:
 917        case SETVAL:
 918        case SETALL:
 919                err = semctl_main(ns,semid,semnum,cmd,version,arg);
 920                return err;
 921        case IPC_RMID:
 922        case IPC_SET:
 923                err = semctl_down(ns, semid, cmd, version, arg);
 924                return err;
 925        default:
 926                return -EINVAL;
 927        }
 928}
 929#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
 930asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg)
 931{
 932        return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg);
 933}
 934SYSCALL_ALIAS(sys_semctl, SyS_semctl);
 935#endif
 936
 937/* If the task doesn't already have a undo_list, then allocate one
 938 * here.  We guarantee there is only one thread using this undo list,
 939 * and current is THE ONE
 940 *
 941 * If this allocation and assignment succeeds, but later
 942 * portions of this code fail, there is no need to free the sem_undo_list.
 943 * Just let it stay associated with the task, and it'll be freed later
 944 * at exit time.
 945 *
 946 * This can block, so callers must hold no locks.
 947 */
 948static inline int get_undo_list(struct sem_undo_list **undo_listp)
 949{
 950        struct sem_undo_list *undo_list;
 951
 952        undo_list = current->sysvsem.undo_list;
 953        if (!undo_list) {
 954                undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
 955                if (undo_list == NULL)
 956                        return -ENOMEM;
 957                spin_lock_init(&undo_list->lock);
 958                atomic_set(&undo_list->refcnt, 1);
 959                INIT_LIST_HEAD(&undo_list->list_proc);
 960
 961                current->sysvsem.undo_list = undo_list;
 962        }
 963        *undo_listp = undo_list;
 964        return 0;
 965}
 966
 967static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
 968{
 969        struct sem_undo *walk;
 970
 971        list_for_each_entry_rcu(walk, &ulp->list_proc, list_proc) {
 972                if (walk->semid == semid)
 973                        return walk;
 974        }
 975        return NULL;
 976}
 977
 978/**
 979 * find_alloc_undo - Lookup (and if not present create) undo array
 980 * @ns: namespace
 981 * @semid: semaphore array id
 982 *
 983 * The function looks up (and if not present creates) the undo structure.
 984 * The size of the undo structure depends on the size of the semaphore
 985 * array, thus the alloc path is not that straightforward.
 986 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
 987 * performs a rcu_read_lock().
 988 */
 989static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
 990{
 991        struct sem_array *sma;
 992        struct sem_undo_list *ulp;
 993        struct sem_undo *un, *new;
 994        int nsems;
 995        int error;
 996
 997        error = get_undo_list(&ulp);
 998        if (error)
 999                return ERR_PTR(error);
1000
1001        rcu_read_lock();
1002        spin_lock(&ulp->lock);
1003        un = lookup_undo(ulp, semid);
1004        spin_unlock(&ulp->lock);
1005        if (likely(un!=NULL))
1006                goto out;
1007        rcu_read_unlock();
1008
1009        /* no undo structure around - allocate one. */
1010        /* step 1: figure out the size of the semaphore array */
1011        sma = sem_lock_check(ns, semid);
1012        if (IS_ERR(sma))
1013                return ERR_PTR(PTR_ERR(sma));
1014
1015        nsems = sma->sem_nsems;
1016        sem_getref_and_unlock(sma);
1017
1018        /* step 2: allocate new undo structure */
1019        new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1020        if (!new) {
1021                sem_putref(sma);
1022                return ERR_PTR(-ENOMEM);
1023        }
1024
1025        /* step 3: Acquire the lock on semaphore array */
1026        sem_lock_and_putref(sma);
1027        if (sma->sem_perm.deleted) {
1028                sem_unlock(sma);
1029                kfree(new);
1030                un = ERR_PTR(-EIDRM);
1031                goto out;
1032        }
1033        spin_lock(&ulp->lock);
1034
1035        /*
1036         * step 4: check for races: did someone else allocate the undo struct?
1037         */
1038        un = lookup_undo(ulp, semid);
1039        if (un) {
1040                kfree(new);
1041                goto success;
1042        }
1043        /* step 5: initialize & link new undo structure */
1044        new->semadj = (short *) &new[1];
1045        new->ulp = ulp;
1046        new->semid = semid;
1047        assert_spin_locked(&ulp->lock);
1048        list_add_rcu(&new->list_proc, &ulp->list_proc);
1049        assert_spin_locked(&sma->sem_perm.lock);
1050        list_add(&new->list_id, &sma->list_id);
1051        un = new;
1052
1053success:
1054        spin_unlock(&ulp->lock);
1055        rcu_read_lock();
1056        sem_unlock(sma);
1057out:
1058        return un;
1059}
1060
1061SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1062                unsigned, nsops, const struct timespec __user *, timeout)
1063{
1064        int error = -EINVAL;
1065        struct sem_array *sma;
1066        struct sembuf fast_sops[SEMOPM_FAST];
1067        struct sembuf* sops = fast_sops, *sop;
1068        struct sem_undo *un;
1069        int undos = 0, alter = 0, max;
1070        struct sem_queue queue;
1071        unsigned long jiffies_left = 0;
1072        struct ipc_namespace *ns;
1073
1074        ns = current->nsproxy->ipc_ns;
1075
1076        if (nsops < 1 || semid < 0)
1077                return -EINVAL;
1078        if (nsops > ns->sc_semopm)
1079                return -E2BIG;
1080        if(nsops > SEMOPM_FAST) {
1081                sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
1082                if(sops==NULL)
1083                        return -ENOMEM;
1084        }
1085        if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
1086                error=-EFAULT;
1087                goto out_free;
1088        }
1089        if (timeout) {
1090                struct timespec _timeout;
1091                if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1092                        error = -EFAULT;
1093                        goto out_free;
1094                }
1095                if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1096                        _timeout.tv_nsec >= 1000000000L) {
1097                        error = -EINVAL;
1098                        goto out_free;
1099                }
1100                jiffies_left = timespec_to_jiffies(&_timeout);
1101        }
1102        max = 0;
1103        for (sop = sops; sop < sops + nsops; sop++) {
1104                if (sop->sem_num >= max)
1105                        max = sop->sem_num;
1106                if (sop->sem_flg & SEM_UNDO)
1107                        undos = 1;
1108                if (sop->sem_op != 0)
1109                        alter = 1;
1110        }
1111
1112        if (undos) {
1113                un = find_alloc_undo(ns, semid);
1114                if (IS_ERR(un)) {
1115                        error = PTR_ERR(un);
1116                        goto out_free;
1117                }
1118        } else
1119                un = NULL;
1120
1121        sma = sem_lock_check(ns, semid);
1122        if (IS_ERR(sma)) {
1123                if (un)
1124                        rcu_read_unlock();
1125                error = PTR_ERR(sma);
1126                goto out_free;
1127        }
1128
1129        /*
1130         * semid identifiers are not unique - find_alloc_undo may have
1131         * allocated an undo structure, it was invalidated by an RMID
1132         * and now a new array with received the same id. Check and fail.
1133         * This case can be detected checking un->semid. The existance of
1134         * "un" itself is guaranteed by rcu.
1135         */
1136        error = -EIDRM;
1137        if (un) {
1138                if (un->semid == -1) {
1139                        rcu_read_unlock();
1140                        goto out_unlock_free;
1141                } else {
1142                        /*
1143                         * rcu lock can be released, "un" cannot disappear:
1144                         * - sem_lock is acquired, thus IPC_RMID is
1145                         *   impossible.
1146                         * - exit_sem is impossible, it always operates on
1147                         *   current (or a dead task).
1148                         */
1149
1150                        rcu_read_unlock();
1151                }
1152        }
1153
1154        error = -EFBIG;
1155        if (max >= sma->sem_nsems)
1156                goto out_unlock_free;
1157
1158        error = -EACCES;
1159        if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1160                goto out_unlock_free;
1161
1162        error = security_sem_semop(sma, sops, nsops, alter);
1163        if (error)
1164                goto out_unlock_free;
1165
1166        error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1167        if (error <= 0) {
1168                if (alter && error == 0)
1169                        update_queue (sma);
1170                goto out_unlock_free;
1171        }
1172
1173        /* We need to sleep on this operation, so we put the current
1174         * task into the pending queue and go to sleep.
1175         */
1176                
1177        queue.sops = sops;
1178        queue.nsops = nsops;
1179        queue.undo = un;
1180        queue.pid = task_tgid_vnr(current);
1181        queue.alter = alter;
1182        if (alter)
1183                list_add_tail(&queue.list, &sma->sem_pending);
1184        else
1185                list_add(&queue.list, &sma->sem_pending);
1186
1187        queue.status = -EINTR;
1188        queue.sleeper = current;
1189        current->state = TASK_INTERRUPTIBLE;
1190        sem_unlock(sma);
1191
1192        if (timeout)
1193                jiffies_left = schedule_timeout(jiffies_left);
1194        else
1195                schedule();
1196
1197        error = queue.status;
1198        while(unlikely(error == IN_WAKEUP)) {
1199                cpu_relax();
1200                error = queue.status;
1201        }
1202
1203        if (error != -EINTR) {
1204                /* fast path: update_queue already obtained all requested
1205                 * resources */
1206                goto out_free;
1207        }
1208
1209        sma = sem_lock(ns, semid);
1210        if (IS_ERR(sma)) {
1211                error = -EIDRM;
1212                goto out_free;
1213        }
1214
1215        /*
1216         * If queue.status != -EINTR we are woken up by another process
1217         */
1218        error = queue.status;
1219        if (error != -EINTR) {
1220                goto out_unlock_free;
1221        }
1222
1223        /*
1224         * If an interrupt occurred we have to clean up the queue
1225         */
1226        if (timeout && jiffies_left == 0)
1227                error = -EAGAIN;
1228        list_del(&queue.list);
1229        goto out_unlock_free;
1230
1231out_unlock_free:
1232        sem_unlock(sma);
1233out_free:
1234        if(sops != fast_sops)
1235                kfree(sops);
1236        return error;
1237}
1238
1239SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
1240                unsigned, nsops)
1241{
1242        return sys_semtimedop(semid, tsops, nsops, NULL);
1243}
1244
1245/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1246 * parent and child tasks.
1247 */
1248
1249int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1250{
1251        struct sem_undo_list *undo_list;
1252        int error;
1253
1254        if (clone_flags & CLONE_SYSVSEM) {
1255                error = get_undo_list(&undo_list);
1256                if (error)
1257                        return error;
1258                atomic_inc(&undo_list->refcnt);
1259                tsk->sysvsem.undo_list = undo_list;
1260        } else 
1261                tsk->sysvsem.undo_list = NULL;
1262
1263        return 0;
1264}
1265
1266/*
1267 * add semadj values to semaphores, free undo structures.
1268 * undo structures are not freed when semaphore arrays are destroyed
1269 * so some of them may be out of date.
1270 * IMPLEMENTATION NOTE: There is some confusion over whether the
1271 * set of adjustments that needs to be done should be done in an atomic
1272 * manner or not. That is, if we are attempting to decrement the semval
1273 * should we queue up and wait until we can do so legally?
1274 * The original implementation attempted to do this (queue and wait).
1275 * The current implementation does not do so. The POSIX standard
1276 * and SVID should be consulted to determine what behavior is mandated.
1277 */
1278void exit_sem(struct task_struct *tsk)
1279{
1280        struct sem_undo_list *ulp;
1281
1282        ulp = tsk->sysvsem.undo_list;
1283        if (!ulp)
1284                return;
1285        tsk->sysvsem.undo_list = NULL;
1286
1287        if (!atomic_dec_and_test(&ulp->refcnt))
1288                return;
1289
1290        for (;;) {
1291                struct sem_array *sma;
1292                struct sem_undo *un;
1293                int semid;
1294                int i;
1295
1296                rcu_read_lock();
1297                un = list_entry(rcu_dereference(ulp->list_proc.next),
1298                                        struct sem_undo, list_proc);
1299                if (&un->list_proc == &ulp->list_proc)
1300                        semid = -1;
1301                 else
1302                        semid = un->semid;
1303                rcu_read_unlock();
1304
1305                if (semid == -1)
1306                        break;
1307
1308                sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
1309
1310                /* exit_sem raced with IPC_RMID, nothing to do */
1311                if (IS_ERR(sma))
1312                        continue;
1313
1314                un = lookup_undo(ulp, semid);
1315                if (un == NULL) {
1316                        /* exit_sem raced with IPC_RMID+semget() that created
1317                         * exactly the same semid. Nothing to do.
1318                         */
1319                        sem_unlock(sma);
1320                        continue;
1321                }
1322
1323                /* remove un from the linked lists */
1324                assert_spin_locked(&sma->sem_perm.lock);
1325                list_del(&un->list_id);
1326
1327                spin_lock(&ulp->lock);
1328                list_del_rcu(&un->list_proc);
1329                spin_unlock(&ulp->lock);
1330
1331                /* perform adjustments registered in un */
1332                for (i = 0; i < sma->sem_nsems; i++) {
1333                        struct sem * semaphore = &sma->sem_base[i];
1334                        if (un->semadj[i]) {
1335                                semaphore->semval += un->semadj[i];
1336                                /*
1337                                 * Range checks of the new semaphore value,
1338                                 * not defined by sus:
1339                                 * - Some unices ignore the undo entirely
1340                                 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
1341                                 * - some cap the value (e.g. FreeBSD caps
1342                                 *   at 0, but doesn't enforce SEMVMX)
1343                                 *
1344                                 * Linux caps the semaphore value, both at 0
1345                                 * and at SEMVMX.
1346                                 *
1347                                 *      Manfred <manfred@colorfullife.com>
1348                                 */
1349                                if (semaphore->semval < 0)
1350                                        semaphore->semval = 0;
1351                                if (semaphore->semval > SEMVMX)
1352                                        semaphore->semval = SEMVMX;
1353                                semaphore->sempid = task_tgid_vnr(current);
1354                        }
1355                }
1356                sma->sem_otime = get_seconds();
1357                /* maybe some queued-up processes were waiting for this */
1358                update_queue(sma);
1359                sem_unlock(sma);
1360
1361                call_rcu(&un->rcu, free_un);
1362        }
1363        kfree(ulp);
1364}
1365
1366#ifdef CONFIG_PROC_FS
1367static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1368{
1369        struct sem_array *sma = it;
1370
1371        return seq_printf(s,
1372                          "%10d %10d  %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
1373                          sma->sem_perm.key,
1374                          sma->sem_perm.id,
1375                          sma->sem_perm.mode,
1376                          sma->sem_nsems,
1377                          sma->sem_perm.uid,
1378                          sma->sem_perm.gid,
1379                          sma->sem_perm.cuid,
1380                          sma->sem_perm.cgid,
1381                          sma->sem_otime,
1382                          sma->sem_ctime);
1383}
1384#endif
1385