linux/ipc/sem.c
<<
>>
Prefs
   1/*
   2 * linux/ipc/sem.c
   3 * Copyright (C) 1992 Krishna Balasubramanian
   4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
   5 *
   6 * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995):
   7 * This code underwent a massive rewrite in order to solve some problems
   8 * with the original code. In particular the original code failed to
   9 * wake up processes that were waiting for semval to go to 0 if the
  10 * value went to 0 and was then incremented rapidly enough. In solving
  11 * this problem I have also modified the implementation so that it
  12 * processes pending operations in a FIFO manner, thus give a guarantee
  13 * that processes waiting for a lock on the semaphore won't starve
  14 * unless another locking process fails to unlock.
  15 * In addition the following two changes in behavior have been introduced:
  16 * - The original implementation of semop returned the value
  17 *   last semaphore element examined on success. This does not
  18 *   match the manual page specifications, and effectively
  19 *   allows the user to read the semaphore even if they do not
  20 *   have read permissions. The implementation now returns 0
  21 *   on success as stated in the manual page.
  22 * - There is some confusion over whether the set of undo adjustments
  23 *   to be performed at exit should be done in an atomic manner.
  24 *   That is, if we are attempting to decrement the semval should we queue
  25 *   up and wait until we can do so legally?
  26 *   The original implementation attempted to do this.
  27 *   The current implementation does not do so. This is because I don't
  28 *   think it is the right thing (TM) to do, and because I couldn't
  29 *   see a clean way to get the old behavior with the new design.
  30 *   The POSIX standard and SVID should be consulted to determine
  31 *   what behavior is mandated.
  32 *
  33 * Further notes on refinement (Christoph Rohland, December 1998):
  34 * - The POSIX standard says, that the undo adjustments simply should
  35 *   redo. So the current implementation is o.K.
  36 * - The previous code had two flaws:
  37 *   1) It actively gave the semaphore to the next waiting process
  38 *      sleeping on the semaphore. Since this process did not have the
  39 *      cpu this led to many unnecessary context switches and bad
  40 *      performance. Now we only check which process should be able to
  41 *      get the semaphore and if this process wants to reduce some
  42 *      semaphore value we simply wake it up without doing the
  43 *      operation. So it has to try to get it later. Thus e.g. the
  44 *      running process may reacquire the semaphore during the current
  45 *      time slice. If it only waits for zero or increases the semaphore,
  46 *      we do the operation in advance and wake it up.
  47 *   2) It did not wake up all zero waiting processes. We try to do
  48 *      better but only get the semops right which only wait for zero or
  49 *      increase. If there are decrement operations in the operations
  50 *      array we do the same as before.
  51 *
  52 * With the incarnation of O(1) scheduler, it becomes unnecessary to perform
  53 * check/retry algorithm for waking up blocked processes as the new scheduler
  54 * is better at handling thread switch than the old one.
  55 *
  56 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  57 *
  58 * SMP-threaded, sysctl's added
  59 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  60 * Enforced range limit on SEM_UNDO
  61 * (c) 2001 Red Hat Inc <alan@redhat.com>
  62 * Lockless wakeup
  63 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
  64 *
  65 * support for audit of ipc object properties and permission changes
  66 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  67 *
  68 * namespaces support
  69 * OpenVZ, SWsoft Inc.
  70 * Pavel Emelianov <xemul@openvz.org>
  71 */
  72
  73#include <linux/slab.h>
  74#include <linux/spinlock.h>
  75#include <linux/init.h>
  76#include <linux/proc_fs.h>
  77#include <linux/time.h>
  78#include <linux/security.h>
  79#include <linux/syscalls.h>
  80#include <linux/audit.h>
  81#include <linux/capability.h>
  82#include <linux/seq_file.h>
  83#include <linux/rwsem.h>
  84#include <linux/nsproxy.h>
  85#include <linux/ipc_namespace.h>
  86
  87#include <asm/uaccess.h>
  88#include "util.h"
  89
  90#define sem_ids(ns)     ((ns)->ids[IPC_SEM_IDS])
  91
  92#define sem_unlock(sma)         ipc_unlock(&(sma)->sem_perm)
  93#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
  94
  95static int newary(struct ipc_namespace *, struct ipc_params *);
  96static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
  97#ifdef CONFIG_PROC_FS
  98static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
  99#endif
 100
 101#define SEMMSL_FAST     256 /* 512 bytes on stack */
 102#define SEMOPM_FAST     64  /* ~ 372 bytes on stack */
 103
 104/*
 105 * linked list protection:
 106 *      sem_undo.id_next,
 107 *      sem_array.sem_pending{,last},
 108 *      sem_array.sem_undo: sem_lock() for read/write
 109 *      sem_undo.proc_next: only "current" is allowed to read/write that field.
 110 *      
 111 */
 112
 113#define sc_semmsl       sem_ctls[0]
 114#define sc_semmns       sem_ctls[1]
 115#define sc_semopm       sem_ctls[2]
 116#define sc_semmni       sem_ctls[3]
 117
 118void sem_init_ns(struct ipc_namespace *ns)
 119{
 120        ns->sc_semmsl = SEMMSL;
 121        ns->sc_semmns = SEMMNS;
 122        ns->sc_semopm = SEMOPM;
 123        ns->sc_semmni = SEMMNI;
 124        ns->used_sems = 0;
 125        ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
 126}
 127
 128#ifdef CONFIG_IPC_NS
 129void sem_exit_ns(struct ipc_namespace *ns)
 130{
 131        free_ipcs(ns, &sem_ids(ns), freeary);
 132}
 133#endif
 134
 135void __init sem_init (void)
 136{
 137        sem_init_ns(&init_ipc_ns);
 138        ipc_init_proc_interface("sysvipc/sem",
 139                                "       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
 140                                IPC_SEM_IDS, sysvipc_sem_proc_show);
 141}
 142
 143/*
 144 * sem_lock_(check_) routines are called in the paths where the rw_mutex
 145 * is not held.
 146 */
 147static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
 148{
 149        struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
 150
 151        if (IS_ERR(ipcp))
 152                return (struct sem_array *)ipcp;
 153
 154        return container_of(ipcp, struct sem_array, sem_perm);
 155}
 156
 157static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
 158                                                int id)
 159{
 160        struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
 161
 162        if (IS_ERR(ipcp))
 163                return (struct sem_array *)ipcp;
 164
 165        return container_of(ipcp, struct sem_array, sem_perm);
 166}
 167
 168static inline void sem_lock_and_putref(struct sem_array *sma)
 169{
 170        ipc_lock_by_ptr(&sma->sem_perm);
 171        ipc_rcu_putref(sma);
 172}
 173
 174static inline void sem_getref_and_unlock(struct sem_array *sma)
 175{
 176        ipc_rcu_getref(sma);
 177        ipc_unlock(&(sma)->sem_perm);
 178}
 179
 180static inline void sem_putref(struct sem_array *sma)
 181{
 182        ipc_lock_by_ptr(&sma->sem_perm);
 183        ipc_rcu_putref(sma);
 184        ipc_unlock(&(sma)->sem_perm);
 185}
 186
 187static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 188{
 189        ipc_rmid(&sem_ids(ns), &s->sem_perm);
 190}
 191
 192/*
 193 * Lockless wakeup algorithm:
 194 * Without the check/retry algorithm a lockless wakeup is possible:
 195 * - queue.status is initialized to -EINTR before blocking.
 196 * - wakeup is performed by
 197 *      * unlinking the queue entry from sma->sem_pending
 198 *      * setting queue.status to IN_WAKEUP
 199 *        This is the notification for the blocked thread that a
 200 *        result value is imminent.
 201 *      * call wake_up_process
 202 *      * set queue.status to the final value.
 203 * - the previously blocked thread checks queue.status:
 204 *      * if it's IN_WAKEUP, then it must wait until the value changes
 205 *      * if it's not -EINTR, then the operation was completed by
 206 *        update_queue. semtimedop can return queue.status without
 207 *        performing any operation on the sem array.
 208 *      * otherwise it must acquire the spinlock and check what's up.
 209 *
 210 * The two-stage algorithm is necessary to protect against the following
 211 * races:
 212 * - if queue.status is set after wake_up_process, then the woken up idle
 213 *   thread could race forward and try (and fail) to acquire sma->lock
 214 *   before update_queue had a chance to set queue.status
 215 * - if queue.status is written before wake_up_process and if the
 216 *   blocked process is woken up by a signal between writing
 217 *   queue.status and the wake_up_process, then the woken up
 218 *   process could return from semtimedop and die by calling
 219 *   sys_exit before wake_up_process is called. Then wake_up_process
 220 *   will oops, because the task structure is already invalid.
 221 *   (yes, this happened on s390 with sysv msg).
 222 *
 223 */
 224#define IN_WAKEUP       1
 225
 226/**
 227 * newary - Create a new semaphore set
 228 * @ns: namespace
 229 * @params: ptr to the structure that contains key, semflg and nsems
 230 *
 231 * Called with sem_ids.rw_mutex held (as a writer)
 232 */
 233
 234static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 235{
 236        int id;
 237        int retval;
 238        struct sem_array *sma;
 239        int size;
 240        key_t key = params->key;
 241        int nsems = params->u.nsems;
 242        int semflg = params->flg;
 243
 244        if (!nsems)
 245                return -EINVAL;
 246        if (ns->used_sems + nsems > ns->sc_semmns)
 247                return -ENOSPC;
 248
 249        size = sizeof (*sma) + nsems * sizeof (struct sem);
 250        sma = ipc_rcu_alloc(size);
 251        if (!sma) {
 252                return -ENOMEM;
 253        }
 254        memset (sma, 0, size);
 255
 256        sma->sem_perm.mode = (semflg & S_IRWXUGO);
 257        sma->sem_perm.key = key;
 258
 259        sma->sem_perm.security = NULL;
 260        retval = security_sem_alloc(sma);
 261        if (retval) {
 262                ipc_rcu_putref(sma);
 263                return retval;
 264        }
 265
 266        id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
 267        if (id < 0) {
 268                security_sem_free(sma);
 269                ipc_rcu_putref(sma);
 270                return id;
 271        }
 272        ns->used_sems += nsems;
 273
 274        sma->sem_base = (struct sem *) &sma[1];
 275        INIT_LIST_HEAD(&sma->sem_pending);
 276        INIT_LIST_HEAD(&sma->list_id);
 277        sma->sem_nsems = nsems;
 278        sma->sem_ctime = get_seconds();
 279        sem_unlock(sma);
 280
 281        return sma->sem_perm.id;
 282}
 283
 284
 285/*
 286 * Called with sem_ids.rw_mutex and ipcp locked.
 287 */
 288static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
 289{
 290        struct sem_array *sma;
 291
 292        sma = container_of(ipcp, struct sem_array, sem_perm);
 293        return security_sem_associate(sma, semflg);
 294}
 295
 296/*
 297 * Called with sem_ids.rw_mutex and ipcp locked.
 298 */
 299static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
 300                                struct ipc_params *params)
 301{
 302        struct sem_array *sma;
 303
 304        sma = container_of(ipcp, struct sem_array, sem_perm);
 305        if (params->u.nsems > sma->sem_nsems)
 306                return -EINVAL;
 307
 308        return 0;
 309}
 310
 311SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 312{
 313        struct ipc_namespace *ns;
 314        struct ipc_ops sem_ops;
 315        struct ipc_params sem_params;
 316
 317        ns = current->nsproxy->ipc_ns;
 318
 319        if (nsems < 0 || nsems > ns->sc_semmsl)
 320                return -EINVAL;
 321
 322        sem_ops.getnew = newary;
 323        sem_ops.associate = sem_security;
 324        sem_ops.more_checks = sem_more_checks;
 325
 326        sem_params.key = key;
 327        sem_params.flg = semflg;
 328        sem_params.u.nsems = nsems;
 329
 330        return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 331}
 332
 333/*
 334 * Determine whether a sequence of semaphore operations would succeed
 335 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
 336 */
 337
 338static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
 339                             int nsops, struct sem_undo *un, int pid)
 340{
 341        int result, sem_op;
 342        struct sembuf *sop;
 343        struct sem * curr;
 344
 345        for (sop = sops; sop < sops + nsops; sop++) {
 346                curr = sma->sem_base + sop->sem_num;
 347                sem_op = sop->sem_op;
 348                result = curr->semval;
 349  
 350                if (!sem_op && result)
 351                        goto would_block;
 352
 353                result += sem_op;
 354                if (result < 0)
 355                        goto would_block;
 356                if (result > SEMVMX)
 357                        goto out_of_range;
 358                if (sop->sem_flg & SEM_UNDO) {
 359                        int undo = un->semadj[sop->sem_num] - sem_op;
 360                        /*
 361                         *      Exceeding the undo range is an error.
 362                         */
 363                        if (undo < (-SEMAEM - 1) || undo > SEMAEM)
 364                                goto out_of_range;
 365                }
 366                curr->semval = result;
 367        }
 368
 369        sop--;
 370        while (sop >= sops) {
 371                sma->sem_base[sop->sem_num].sempid = pid;
 372                if (sop->sem_flg & SEM_UNDO)
 373                        un->semadj[sop->sem_num] -= sop->sem_op;
 374                sop--;
 375        }
 376        
 377        sma->sem_otime = get_seconds();
 378        return 0;
 379
 380out_of_range:
 381        result = -ERANGE;
 382        goto undo;
 383
 384would_block:
 385        if (sop->sem_flg & IPC_NOWAIT)
 386                result = -EAGAIN;
 387        else
 388                result = 1;
 389
 390undo:
 391        sop--;
 392        while (sop >= sops) {
 393                sma->sem_base[sop->sem_num].semval -= sop->sem_op;
 394                sop--;
 395        }
 396
 397        return result;
 398}
 399
 400/* Go through the pending queue for the indicated semaphore
 401 * looking for tasks that can be completed.
 402 */
 403static void update_queue (struct sem_array * sma)
 404{
 405        int error;
 406        struct sem_queue * q;
 407
 408        q = list_entry(sma->sem_pending.next, struct sem_queue, list);
 409        while (&q->list != &sma->sem_pending) {
 410                error = try_atomic_semop(sma, q->sops, q->nsops,
 411                                         q->undo, q->pid);
 412
 413                /* Does q->sleeper still need to sleep? */
 414                if (error <= 0) {
 415                        struct sem_queue *n;
 416
 417                        /*
 418                         * Continue scanning. The next operation
 419                         * that must be checked depends on the type of the
 420                         * completed operation:
 421                         * - if the operation modified the array, then
 422                         *   restart from the head of the queue and
 423                         *   check for threads that might be waiting
 424                         *   for semaphore values to become 0.
 425                         * - if the operation didn't modify the array,
 426                         *   then just continue.
 427                         * The order of list_del() and reading ->next
 428                         * is crucial: In the former case, the list_del()
 429                         * must be done first [because we might be the
 430                         * first entry in ->sem_pending], in the latter
 431                         * case the list_del() must be done last
 432                         * [because the list is invalid after the list_del()]
 433                         */
 434                        if (q->alter) {
 435                                list_del(&q->list);
 436                                n = list_entry(sma->sem_pending.next,
 437                                                struct sem_queue, list);
 438                        } else {
 439                                n = list_entry(q->list.next, struct sem_queue,
 440                                                list);
 441                                list_del(&q->list);
 442                        }
 443
 444                        /* wake up the waiting thread */
 445                        q->status = IN_WAKEUP;
 446
 447                        wake_up_process(q->sleeper);
 448                        /* hands-off: q will disappear immediately after
 449                         * writing q->status.
 450                         */
 451                        smp_wmb();
 452                        q->status = error;
 453                        q = n;
 454                } else {
 455                        q = list_entry(q->list.next, struct sem_queue, list);
 456                }
 457        }
 458}
 459
 460/* The following counts are associated to each semaphore:
 461 *   semncnt        number of tasks waiting on semval being nonzero
 462 *   semzcnt        number of tasks waiting on semval being zero
 463 * This model assumes that a task waits on exactly one semaphore.
 464 * Since semaphore operations are to be performed atomically, tasks actually
 465 * wait on a whole sequence of semaphores simultaneously.
 466 * The counts we return here are a rough approximation, but still
 467 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
 468 */
 469static int count_semncnt (struct sem_array * sma, ushort semnum)
 470{
 471        int semncnt;
 472        struct sem_queue * q;
 473
 474        semncnt = 0;
 475        list_for_each_entry(q, &sma->sem_pending, list) {
 476                struct sembuf * sops = q->sops;
 477                int nsops = q->nsops;
 478                int i;
 479                for (i = 0; i < nsops; i++)
 480                        if (sops[i].sem_num == semnum
 481                            && (sops[i].sem_op < 0)
 482                            && !(sops[i].sem_flg & IPC_NOWAIT))
 483                                semncnt++;
 484        }
 485        return semncnt;
 486}
 487
 488static int count_semzcnt (struct sem_array * sma, ushort semnum)
 489{
 490        int semzcnt;
 491        struct sem_queue * q;
 492
 493        semzcnt = 0;
 494        list_for_each_entry(q, &sma->sem_pending, list) {
 495                struct sembuf * sops = q->sops;
 496                int nsops = q->nsops;
 497                int i;
 498                for (i = 0; i < nsops; i++)
 499                        if (sops[i].sem_num == semnum
 500                            && (sops[i].sem_op == 0)
 501                            && !(sops[i].sem_flg & IPC_NOWAIT))
 502                                semzcnt++;
 503        }
 504        return semzcnt;
 505}
 506
 507static void free_un(struct rcu_head *head)
 508{
 509        struct sem_undo *un = container_of(head, struct sem_undo, rcu);
 510        kfree(un);
 511}
 512
 513/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
 514 * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
 515 * remains locked on exit.
 516 */
 517static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 518{
 519        struct sem_undo *un, *tu;
 520        struct sem_queue *q, *tq;
 521        struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
 522
 523        /* Free the existing undo structures for this semaphore set.  */
 524        assert_spin_locked(&sma->sem_perm.lock);
 525        list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
 526                list_del(&un->list_id);
 527                spin_lock(&un->ulp->lock);
 528                un->semid = -1;
 529                list_del_rcu(&un->list_proc);
 530                spin_unlock(&un->ulp->lock);
 531                call_rcu(&un->rcu, free_un);
 532        }
 533
 534        /* Wake up all pending processes and let them fail with EIDRM. */
 535        list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
 536                list_del(&q->list);
 537
 538                q->status = IN_WAKEUP;
 539                wake_up_process(q->sleeper); /* doesn't sleep */
 540                smp_wmb();
 541                q->status = -EIDRM;     /* hands-off q */
 542        }
 543
 544        /* Remove the semaphore set from the IDR */
 545        sem_rmid(ns, sma);
 546        sem_unlock(sma);
 547
 548        ns->used_sems -= sma->sem_nsems;
 549        security_sem_free(sma);
 550        ipc_rcu_putref(sma);
 551}
 552
 553static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
 554{
 555        switch(version) {
 556        case IPC_64:
 557                return copy_to_user(buf, in, sizeof(*in));
 558        case IPC_OLD:
 559            {
 560                struct semid_ds out;
 561
 562                ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
 563
 564                out.sem_otime   = in->sem_otime;
 565                out.sem_ctime   = in->sem_ctime;
 566                out.sem_nsems   = in->sem_nsems;
 567
 568                return copy_to_user(buf, &out, sizeof(out));
 569            }
 570        default:
 571                return -EINVAL;
 572        }
 573}
 574
 575static int semctl_nolock(struct ipc_namespace *ns, int semid,
 576                         int cmd, int version, union semun arg)
 577{
 578        int err = -EINVAL;
 579        struct sem_array *sma;
 580
 581        switch(cmd) {
 582        case IPC_INFO:
 583        case SEM_INFO:
 584        {
 585                struct seminfo seminfo;
 586                int max_id;
 587
 588                err = security_sem_semctl(NULL, cmd);
 589                if (err)
 590                        return err;
 591                
 592                memset(&seminfo,0,sizeof(seminfo));
 593                seminfo.semmni = ns->sc_semmni;
 594                seminfo.semmns = ns->sc_semmns;
 595                seminfo.semmsl = ns->sc_semmsl;
 596                seminfo.semopm = ns->sc_semopm;
 597                seminfo.semvmx = SEMVMX;
 598                seminfo.semmnu = SEMMNU;
 599                seminfo.semmap = SEMMAP;
 600                seminfo.semume = SEMUME;
 601                down_read(&sem_ids(ns).rw_mutex);
 602                if (cmd == SEM_INFO) {
 603                        seminfo.semusz = sem_ids(ns).in_use;
 604                        seminfo.semaem = ns->used_sems;
 605                } else {
 606                        seminfo.semusz = SEMUSZ;
 607                        seminfo.semaem = SEMAEM;
 608                }
 609                max_id = ipc_get_maxid(&sem_ids(ns));
 610                up_read(&sem_ids(ns).rw_mutex);
 611                if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) 
 612                        return -EFAULT;
 613                return (max_id < 0) ? 0: max_id;
 614        }
 615        case IPC_STAT:
 616        case SEM_STAT:
 617        {
 618                struct semid64_ds tbuf;
 619                int id;
 620
 621                if (cmd == SEM_STAT) {
 622                        sma = sem_lock(ns, semid);
 623                        if (IS_ERR(sma))
 624                                return PTR_ERR(sma);
 625                        id = sma->sem_perm.id;
 626                } else {
 627                        sma = sem_lock_check(ns, semid);
 628                        if (IS_ERR(sma))
 629                                return PTR_ERR(sma);
 630                        id = 0;
 631                }
 632
 633                err = -EACCES;
 634                if (ipcperms (&sma->sem_perm, S_IRUGO))
 635                        goto out_unlock;
 636
 637                err = security_sem_semctl(sma, cmd);
 638                if (err)
 639                        goto out_unlock;
 640
 641                memset(&tbuf, 0, sizeof(tbuf));
 642
 643                kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
 644                tbuf.sem_otime  = sma->sem_otime;
 645                tbuf.sem_ctime  = sma->sem_ctime;
 646                tbuf.sem_nsems  = sma->sem_nsems;
 647                sem_unlock(sma);
 648                if (copy_semid_to_user (arg.buf, &tbuf, version))
 649                        return -EFAULT;
 650                return id;
 651        }
 652        default:
 653                return -EINVAL;
 654        }
 655        return err;
 656out_unlock:
 657        sem_unlock(sma);
 658        return err;
 659}
 660
 661static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
 662                int cmd, int version, union semun arg)
 663{
 664        struct sem_array *sma;
 665        struct sem* curr;
 666        int err;
 667        ushort fast_sem_io[SEMMSL_FAST];
 668        ushort* sem_io = fast_sem_io;
 669        int nsems;
 670
 671        sma = sem_lock_check(ns, semid);
 672        if (IS_ERR(sma))
 673                return PTR_ERR(sma);
 674
 675        nsems = sma->sem_nsems;
 676
 677        err = -EACCES;
 678        if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO))
 679                goto out_unlock;
 680
 681        err = security_sem_semctl(sma, cmd);
 682        if (err)
 683                goto out_unlock;
 684
 685        err = -EACCES;
 686        switch (cmd) {
 687        case GETALL:
 688        {
 689                ushort __user *array = arg.array;
 690                int i;
 691
 692                if(nsems > SEMMSL_FAST) {
 693                        sem_getref_and_unlock(sma);
 694
 695                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
 696                        if(sem_io == NULL) {
 697                                sem_putref(sma);
 698                                return -ENOMEM;
 699                        }
 700
 701                        sem_lock_and_putref(sma);
 702                        if (sma->sem_perm.deleted) {
 703                                sem_unlock(sma);
 704                                err = -EIDRM;
 705                                goto out_free;
 706                        }
 707                }
 708
 709                for (i = 0; i < sma->sem_nsems; i++)
 710                        sem_io[i] = sma->sem_base[i].semval;
 711                sem_unlock(sma);
 712                err = 0;
 713                if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
 714                        err = -EFAULT;
 715                goto out_free;
 716        }
 717        case SETALL:
 718        {
 719                int i;
 720                struct sem_undo *un;
 721
 722                sem_getref_and_unlock(sma);
 723
 724                if(nsems > SEMMSL_FAST) {
 725                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
 726                        if(sem_io == NULL) {
 727                                sem_putref(sma);
 728                                return -ENOMEM;
 729                        }
 730                }
 731
 732                if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) {
 733                        sem_putref(sma);
 734                        err = -EFAULT;
 735                        goto out_free;
 736                }
 737
 738                for (i = 0; i < nsems; i++) {
 739                        if (sem_io[i] > SEMVMX) {
 740                                sem_putref(sma);
 741                                err = -ERANGE;
 742                                goto out_free;
 743                        }
 744                }
 745                sem_lock_and_putref(sma);
 746                if (sma->sem_perm.deleted) {
 747                        sem_unlock(sma);
 748                        err = -EIDRM;
 749                        goto out_free;
 750                }
 751
 752                for (i = 0; i < nsems; i++)
 753                        sma->sem_base[i].semval = sem_io[i];
 754
 755                assert_spin_locked(&sma->sem_perm.lock);
 756                list_for_each_entry(un, &sma->list_id, list_id) {
 757                        for (i = 0; i < nsems; i++)
 758                                un->semadj[i] = 0;
 759                }
 760                sma->sem_ctime = get_seconds();
 761                /* maybe some queued-up processes were waiting for this */
 762                update_queue(sma);
 763                err = 0;
 764                goto out_unlock;
 765        }
 766        /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
 767        }
 768        err = -EINVAL;
 769        if(semnum < 0 || semnum >= nsems)
 770                goto out_unlock;
 771
 772        curr = &sma->sem_base[semnum];
 773
 774        switch (cmd) {
 775        case GETVAL:
 776                err = curr->semval;
 777                goto out_unlock;
 778        case GETPID:
 779                err = curr->sempid;
 780                goto out_unlock;
 781        case GETNCNT:
 782                err = count_semncnt(sma,semnum);
 783                goto out_unlock;
 784        case GETZCNT:
 785                err = count_semzcnt(sma,semnum);
 786                goto out_unlock;
 787        case SETVAL:
 788        {
 789                int val = arg.val;
 790                struct sem_undo *un;
 791
 792                err = -ERANGE;
 793                if (val > SEMVMX || val < 0)
 794                        goto out_unlock;
 795
 796                assert_spin_locked(&sma->sem_perm.lock);
 797                list_for_each_entry(un, &sma->list_id, list_id)
 798                        un->semadj[semnum] = 0;
 799
 800                curr->semval = val;
 801                curr->sempid = task_tgid_vnr(current);
 802                sma->sem_ctime = get_seconds();
 803                /* maybe some queued-up processes were waiting for this */
 804                update_queue(sma);
 805                err = 0;
 806                goto out_unlock;
 807        }
 808        }
 809out_unlock:
 810        sem_unlock(sma);
 811out_free:
 812        if(sem_io != fast_sem_io)
 813                ipc_free(sem_io, sizeof(ushort)*nsems);
 814        return err;
 815}
 816
 817static inline unsigned long
 818copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
 819{
 820        switch(version) {
 821        case IPC_64:
 822                if (copy_from_user(out, buf, sizeof(*out)))
 823                        return -EFAULT;
 824                return 0;
 825        case IPC_OLD:
 826            {
 827                struct semid_ds tbuf_old;
 828
 829                if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 830                        return -EFAULT;
 831
 832                out->sem_perm.uid       = tbuf_old.sem_perm.uid;
 833                out->sem_perm.gid       = tbuf_old.sem_perm.gid;
 834                out->sem_perm.mode      = tbuf_old.sem_perm.mode;
 835
 836                return 0;
 837            }
 838        default:
 839                return -EINVAL;
 840        }
 841}
 842
 843/*
 844 * This function handles some semctl commands which require the rw_mutex
 845 * to be held in write mode.
 846 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
 847 */
 848static int semctl_down(struct ipc_namespace *ns, int semid,
 849                       int cmd, int version, union semun arg)
 850{
 851        struct sem_array *sma;
 852        int err;
 853        struct semid64_ds semid64;
 854        struct kern_ipc_perm *ipcp;
 855
 856        if(cmd == IPC_SET) {
 857                if (copy_semid_from_user(&semid64, arg.buf, version))
 858                        return -EFAULT;
 859        }
 860
 861        ipcp = ipcctl_pre_down(&sem_ids(ns), semid, cmd, &semid64.sem_perm, 0);
 862        if (IS_ERR(ipcp))
 863                return PTR_ERR(ipcp);
 864
 865        sma = container_of(ipcp, struct sem_array, sem_perm);
 866
 867        err = security_sem_semctl(sma, cmd);
 868        if (err)
 869                goto out_unlock;
 870
 871        switch(cmd){
 872        case IPC_RMID:
 873                freeary(ns, ipcp);
 874                goto out_up;
 875        case IPC_SET:
 876                ipc_update_perm(&semid64.sem_perm, ipcp);
 877                sma->sem_ctime = get_seconds();
 878                break;
 879        default:
 880                err = -EINVAL;
 881        }
 882
 883out_unlock:
 884        sem_unlock(sma);
 885out_up:
 886        up_write(&sem_ids(ns).rw_mutex);
 887        return err;
 888}
 889
 890SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg)
 891{
 892        int err = -EINVAL;
 893        int version;
 894        struct ipc_namespace *ns;
 895
 896        if (semid < 0)
 897                return -EINVAL;
 898
 899        version = ipc_parse_version(&cmd);
 900        ns = current->nsproxy->ipc_ns;
 901
 902        switch(cmd) {
 903        case IPC_INFO:
 904        case SEM_INFO:
 905        case IPC_STAT:
 906        case SEM_STAT:
 907                err = semctl_nolock(ns, semid, cmd, version, arg);
 908                return err;
 909        case GETALL:
 910        case GETVAL:
 911        case GETPID:
 912        case GETNCNT:
 913        case GETZCNT:
 914        case SETVAL:
 915        case SETALL:
 916                err = semctl_main(ns,semid,semnum,cmd,version,arg);
 917                return err;
 918        case IPC_RMID:
 919        case IPC_SET:
 920                err = semctl_down(ns, semid, cmd, version, arg);
 921                return err;
 922        default:
 923                return -EINVAL;
 924        }
 925}
 926#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
 927asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg)
 928{
 929        return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg);
 930}
 931SYSCALL_ALIAS(sys_semctl, SyS_semctl);
 932#endif
 933
 934/* If the task doesn't already have a undo_list, then allocate one
 935 * here.  We guarantee there is only one thread using this undo list,
 936 * and current is THE ONE
 937 *
 938 * If this allocation and assignment succeeds, but later
 939 * portions of this code fail, there is no need to free the sem_undo_list.
 940 * Just let it stay associated with the task, and it'll be freed later
 941 * at exit time.
 942 *
 943 * This can block, so callers must hold no locks.
 944 */
 945static inline int get_undo_list(struct sem_undo_list **undo_listp)
 946{
 947        struct sem_undo_list *undo_list;
 948
 949        undo_list = current->sysvsem.undo_list;
 950        if (!undo_list) {
 951                undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
 952                if (undo_list == NULL)
 953                        return -ENOMEM;
 954                spin_lock_init(&undo_list->lock);
 955                atomic_set(&undo_list->refcnt, 1);
 956                INIT_LIST_HEAD(&undo_list->list_proc);
 957
 958                current->sysvsem.undo_list = undo_list;
 959        }
 960        *undo_listp = undo_list;
 961        return 0;
 962}
 963
 964static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
 965{
 966        struct sem_undo *walk;
 967
 968        list_for_each_entry_rcu(walk, &ulp->list_proc, list_proc) {
 969                if (walk->semid == semid)
 970                        return walk;
 971        }
 972        return NULL;
 973}
 974
 975/**
 976 * find_alloc_undo - Lookup (and if not present create) undo array
 977 * @ns: namespace
 978 * @semid: semaphore array id
 979 *
 980 * The function looks up (and if not present creates) the undo structure.
 981 * The size of the undo structure depends on the size of the semaphore
 982 * array, thus the alloc path is not that straightforward.
 983 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
 984 * performs a rcu_read_lock().
 985 */
 986static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
 987{
 988        struct sem_array *sma;
 989        struct sem_undo_list *ulp;
 990        struct sem_undo *un, *new;
 991        int nsems;
 992        int error;
 993
 994        error = get_undo_list(&ulp);
 995        if (error)
 996                return ERR_PTR(error);
 997
 998        rcu_read_lock();
 999        spin_lock(&ulp->lock);
1000        un = lookup_undo(ulp, semid);
1001        spin_unlock(&ulp->lock);
1002        if (likely(un!=NULL))
1003                goto out;
1004        rcu_read_unlock();
1005
1006        /* no undo structure around - allocate one. */
1007        /* step 1: figure out the size of the semaphore array */
1008        sma = sem_lock_check(ns, semid);
1009        if (IS_ERR(sma))
1010                return ERR_PTR(PTR_ERR(sma));
1011
1012        nsems = sma->sem_nsems;
1013        sem_getref_and_unlock(sma);
1014
1015        /* step 2: allocate new undo structure */
1016        new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1017        if (!new) {
1018                sem_putref(sma);
1019                return ERR_PTR(-ENOMEM);
1020        }
1021
1022        /* step 3: Acquire the lock on semaphore array */
1023        sem_lock_and_putref(sma);
1024        if (sma->sem_perm.deleted) {
1025                sem_unlock(sma);
1026                kfree(new);
1027                un = ERR_PTR(-EIDRM);
1028                goto out;
1029        }
1030        spin_lock(&ulp->lock);
1031
1032        /*
1033         * step 4: check for races: did someone else allocate the undo struct?
1034         */
1035        un = lookup_undo(ulp, semid);
1036        if (un) {
1037                kfree(new);
1038                goto success;
1039        }
1040        /* step 5: initialize & link new undo structure */
1041        new->semadj = (short *) &new[1];
1042        new->ulp = ulp;
1043        new->semid = semid;
1044        assert_spin_locked(&ulp->lock);
1045        list_add_rcu(&new->list_proc, &ulp->list_proc);
1046        assert_spin_locked(&sma->sem_perm.lock);
1047        list_add(&new->list_id, &sma->list_id);
1048        un = new;
1049
1050success:
1051        spin_unlock(&ulp->lock);
1052        rcu_read_lock();
1053        sem_unlock(sma);
1054out:
1055        return un;
1056}
1057
1058SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1059                unsigned, nsops, const struct timespec __user *, timeout)
1060{
1061        int error = -EINVAL;
1062        struct sem_array *sma;
1063        struct sembuf fast_sops[SEMOPM_FAST];
1064        struct sembuf* sops = fast_sops, *sop;
1065        struct sem_undo *un;
1066        int undos = 0, alter = 0, max;
1067        struct sem_queue queue;
1068        unsigned long jiffies_left = 0;
1069        struct ipc_namespace *ns;
1070
1071        ns = current->nsproxy->ipc_ns;
1072
1073        if (nsops < 1 || semid < 0)
1074                return -EINVAL;
1075        if (nsops > ns->sc_semopm)
1076                return -E2BIG;
1077        if(nsops > SEMOPM_FAST) {
1078                sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
1079                if(sops==NULL)
1080                        return -ENOMEM;
1081        }
1082        if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
1083                error=-EFAULT;
1084                goto out_free;
1085        }
1086        if (timeout) {
1087                struct timespec _timeout;
1088                if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1089                        error = -EFAULT;
1090                        goto out_free;
1091                }
1092                if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1093                        _timeout.tv_nsec >= 1000000000L) {
1094                        error = -EINVAL;
1095                        goto out_free;
1096                }
1097                jiffies_left = timespec_to_jiffies(&_timeout);
1098        }
1099        max = 0;
1100        for (sop = sops; sop < sops + nsops; sop++) {
1101                if (sop->sem_num >= max)
1102                        max = sop->sem_num;
1103                if (sop->sem_flg & SEM_UNDO)
1104                        undos = 1;
1105                if (sop->sem_op != 0)
1106                        alter = 1;
1107        }
1108
1109        if (undos) {
1110                un = find_alloc_undo(ns, semid);
1111                if (IS_ERR(un)) {
1112                        error = PTR_ERR(un);
1113                        goto out_free;
1114                }
1115        } else
1116                un = NULL;
1117
1118        sma = sem_lock_check(ns, semid);
1119        if (IS_ERR(sma)) {
1120                if (un)
1121                        rcu_read_unlock();
1122                error = PTR_ERR(sma);
1123                goto out_free;
1124        }
1125
1126        /*
1127         * semid identifiers are not unique - find_alloc_undo may have
1128         * allocated an undo structure, it was invalidated by an RMID
1129         * and now a new array with received the same id. Check and fail.
1130         * This case can be detected checking un->semid. The existance of
1131         * "un" itself is guaranteed by rcu.
1132         */
1133        error = -EIDRM;
1134        if (un) {
1135                if (un->semid == -1) {
1136                        rcu_read_unlock();
1137                        goto out_unlock_free;
1138                } else {
1139                        /*
1140                         * rcu lock can be released, "un" cannot disappear:
1141                         * - sem_lock is acquired, thus IPC_RMID is
1142                         *   impossible.
1143                         * - exit_sem is impossible, it always operates on
1144                         *   current (or a dead task).
1145                         */
1146
1147                        rcu_read_unlock();
1148                }
1149        }
1150
1151        error = -EFBIG;
1152        if (max >= sma->sem_nsems)
1153                goto out_unlock_free;
1154
1155        error = -EACCES;
1156        if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1157                goto out_unlock_free;
1158
1159        error = security_sem_semop(sma, sops, nsops, alter);
1160        if (error)
1161                goto out_unlock_free;
1162
1163        error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1164        if (error <= 0) {
1165                if (alter && error == 0)
1166                        update_queue (sma);
1167                goto out_unlock_free;
1168        }
1169
1170        /* We need to sleep on this operation, so we put the current
1171         * task into the pending queue and go to sleep.
1172         */
1173                
1174        queue.sops = sops;
1175        queue.nsops = nsops;
1176        queue.undo = un;
1177        queue.pid = task_tgid_vnr(current);
1178        queue.alter = alter;
1179        if (alter)
1180                list_add_tail(&queue.list, &sma->sem_pending);
1181        else
1182                list_add(&queue.list, &sma->sem_pending);
1183
1184        queue.status = -EINTR;
1185        queue.sleeper = current;
1186        current->state = TASK_INTERRUPTIBLE;
1187        sem_unlock(sma);
1188
1189        if (timeout)
1190                jiffies_left = schedule_timeout(jiffies_left);
1191        else
1192                schedule();
1193
1194        error = queue.status;
1195        while(unlikely(error == IN_WAKEUP)) {
1196                cpu_relax();
1197                error = queue.status;
1198        }
1199
1200        if (error != -EINTR) {
1201                /* fast path: update_queue already obtained all requested
1202                 * resources */
1203                goto out_free;
1204        }
1205
1206        sma = sem_lock(ns, semid);
1207        if (IS_ERR(sma)) {
1208                error = -EIDRM;
1209                goto out_free;
1210        }
1211
1212        /*
1213         * If queue.status != -EINTR we are woken up by another process
1214         */
1215        error = queue.status;
1216        if (error != -EINTR) {
1217                goto out_unlock_free;
1218        }
1219
1220        /*
1221         * If an interrupt occurred we have to clean up the queue
1222         */
1223        if (timeout && jiffies_left == 0)
1224                error = -EAGAIN;
1225        list_del(&queue.list);
1226        goto out_unlock_free;
1227
1228out_unlock_free:
1229        sem_unlock(sma);
1230out_free:
1231        if(sops != fast_sops)
1232                kfree(sops);
1233        return error;
1234}
1235
1236SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
1237                unsigned, nsops)
1238{
1239        return sys_semtimedop(semid, tsops, nsops, NULL);
1240}
1241
1242/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1243 * parent and child tasks.
1244 */
1245
1246int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1247{
1248        struct sem_undo_list *undo_list;
1249        int error;
1250
1251        if (clone_flags & CLONE_SYSVSEM) {
1252                error = get_undo_list(&undo_list);
1253                if (error)
1254                        return error;
1255                atomic_inc(&undo_list->refcnt);
1256                tsk->sysvsem.undo_list = undo_list;
1257        } else 
1258                tsk->sysvsem.undo_list = NULL;
1259
1260        return 0;
1261}
1262
1263/*
1264 * add semadj values to semaphores, free undo structures.
1265 * undo structures are not freed when semaphore arrays are destroyed
1266 * so some of them may be out of date.
1267 * IMPLEMENTATION NOTE: There is some confusion over whether the
1268 * set of adjustments that needs to be done should be done in an atomic
1269 * manner or not. That is, if we are attempting to decrement the semval
1270 * should we queue up and wait until we can do so legally?
1271 * The original implementation attempted to do this (queue and wait).
1272 * The current implementation does not do so. The POSIX standard
1273 * and SVID should be consulted to determine what behavior is mandated.
1274 */
1275void exit_sem(struct task_struct *tsk)
1276{
1277        struct sem_undo_list *ulp;
1278
1279        ulp = tsk->sysvsem.undo_list;
1280        if (!ulp)
1281                return;
1282        tsk->sysvsem.undo_list = NULL;
1283
1284        if (!atomic_dec_and_test(&ulp->refcnt))
1285                return;
1286
1287        for (;;) {
1288                struct sem_array *sma;
1289                struct sem_undo *un;
1290                int semid;
1291                int i;
1292
1293                rcu_read_lock();
1294                un = list_entry(rcu_dereference(ulp->list_proc.next),
1295                                        struct sem_undo, list_proc);
1296                if (&un->list_proc == &ulp->list_proc)
1297                        semid = -1;
1298                 else
1299                        semid = un->semid;
1300                rcu_read_unlock();
1301
1302                if (semid == -1)
1303                        break;
1304
1305                sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
1306
1307                /* exit_sem raced with IPC_RMID, nothing to do */
1308                if (IS_ERR(sma))
1309                        continue;
1310
1311                un = lookup_undo(ulp, semid);
1312                if (un == NULL) {
1313                        /* exit_sem raced with IPC_RMID+semget() that created
1314                         * exactly the same semid. Nothing to do.
1315                         */
1316                        sem_unlock(sma);
1317                        continue;
1318                }
1319
1320                /* remove un from the linked lists */
1321                assert_spin_locked(&sma->sem_perm.lock);
1322                list_del(&un->list_id);
1323
1324                spin_lock(&ulp->lock);
1325                list_del_rcu(&un->list_proc);
1326                spin_unlock(&ulp->lock);
1327
1328                /* perform adjustments registered in un */
1329                for (i = 0; i < sma->sem_nsems; i++) {
1330                        struct sem * semaphore = &sma->sem_base[i];
1331                        if (un->semadj[i]) {
1332                                semaphore->semval += un->semadj[i];
1333                                /*
1334                                 * Range checks of the new semaphore value,
1335                                 * not defined by sus:
1336                                 * - Some unices ignore the undo entirely
1337                                 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
1338                                 * - some cap the value (e.g. FreeBSD caps
1339                                 *   at 0, but doesn't enforce SEMVMX)
1340                                 *
1341                                 * Linux caps the semaphore value, both at 0
1342                                 * and at SEMVMX.
1343                                 *
1344                                 *      Manfred <manfred@colorfullife.com>
1345                                 */
1346                                if (semaphore->semval < 0)
1347                                        semaphore->semval = 0;
1348                                if (semaphore->semval > SEMVMX)
1349                                        semaphore->semval = SEMVMX;
1350                                semaphore->sempid = task_tgid_vnr(current);
1351                        }
1352                }
1353                sma->sem_otime = get_seconds();
1354                /* maybe some queued-up processes were waiting for this */
1355                update_queue(sma);
1356                sem_unlock(sma);
1357
1358                call_rcu(&un->rcu, free_un);
1359        }
1360        kfree(ulp);
1361}
1362
1363#ifdef CONFIG_PROC_FS
1364static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1365{
1366        struct sem_array *sma = it;
1367
1368        return seq_printf(s,
1369                          "%10d %10d  %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
1370                          sma->sem_perm.key,
1371                          sma->sem_perm.id,
1372                          sma->sem_perm.mode,
1373                          sma->sem_nsems,
1374                          sma->sem_perm.uid,
1375                          sma->sem_perm.gid,
1376                          sma->sem_perm.cuid,
1377                          sma->sem_perm.cgid,
1378                          sma->sem_otime,
1379                          sma->sem_ctime);
1380}
1381#endif
1382