1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76#include <linux/slab.h>
77#include <linux/spinlock.h>
78#include <linux/init.h>
79#include <linux/proc_fs.h>
80#include <linux/time.h>
81#include <linux/security.h>
82#include <linux/syscalls.h>
83#include <linux/audit.h>
84#include <linux/capability.h>
85#include <linux/seq_file.h>
86#include <linux/rwsem.h>
87#include <linux/nsproxy.h>
88#include <linux/ipc_namespace.h>
89
90#include <asm/uaccess.h>
91#include "util.h"
92
93
94struct sem {
95 int semval;
96 int sempid;
97 struct list_head sem_pending;
98};
99
100
101struct sem_queue {
102 struct list_head simple_list;
103 struct list_head list;
104 struct task_struct *sleeper;
105 struct sem_undo *undo;
106 int pid;
107 int status;
108 struct sembuf *sops;
109 int nsops;
110 int alter;
111};
112
113
114
115
116struct sem_undo {
117 struct list_head list_proc;
118
119
120 struct rcu_head rcu;
121 struct sem_undo_list *ulp;
122 struct list_head list_id;
123
124 int semid;
125 short *semadj;
126
127};
128
129
130
131
132struct sem_undo_list {
133 atomic_t refcnt;
134 spinlock_t lock;
135 struct list_head list_proc;
136};
137
138
139#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
140
141#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
142#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
143
144static int newary(struct ipc_namespace *, struct ipc_params *);
145static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
146#ifdef CONFIG_PROC_FS
147static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
148#endif
149
150#define SEMMSL_FAST 256
151#define SEMOPM_FAST 64
152
153
154
155
156
157
158
159
160
161
162#define sc_semmsl sem_ctls[0]
163#define sc_semmns sem_ctls[1]
164#define sc_semopm sem_ctls[2]
165#define sc_semmni sem_ctls[3]
166
167void sem_init_ns(struct ipc_namespace *ns)
168{
169 ns->sc_semmsl = SEMMSL;
170 ns->sc_semmns = SEMMNS;
171 ns->sc_semopm = SEMOPM;
172 ns->sc_semmni = SEMMNI;
173 ns->used_sems = 0;
174 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
175}
176
177#ifdef CONFIG_IPC_NS
178void sem_exit_ns(struct ipc_namespace *ns)
179{
180 free_ipcs(ns, &sem_ids(ns), freeary);
181 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
182}
183#endif
184
185void __init sem_init (void)
186{
187 sem_init_ns(&init_ipc_ns);
188 ipc_init_proc_interface("sysvipc/sem",
189 " key semid perms nsems uid gid cuid cgid otime ctime\n",
190 IPC_SEM_IDS, sysvipc_sem_proc_show);
191}
192
193
194
195
196
197static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
198{
199 struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
200
201 if (IS_ERR(ipcp))
202 return (struct sem_array *)ipcp;
203
204 return container_of(ipcp, struct sem_array, sem_perm);
205}
206
207static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
208 int id)
209{
210 struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
211
212 if (IS_ERR(ipcp))
213 return (struct sem_array *)ipcp;
214
215 return container_of(ipcp, struct sem_array, sem_perm);
216}
217
218static inline void sem_lock_and_putref(struct sem_array *sma)
219{
220 ipc_lock_by_ptr(&sma->sem_perm);
221 ipc_rcu_putref(sma);
222}
223
224static inline void sem_getref_and_unlock(struct sem_array *sma)
225{
226 ipc_rcu_getref(sma);
227 ipc_unlock(&(sma)->sem_perm);
228}
229
230static inline void sem_putref(struct sem_array *sma)
231{
232 ipc_lock_by_ptr(&sma->sem_perm);
233 ipc_rcu_putref(sma);
234 ipc_unlock(&(sma)->sem_perm);
235}
236
237static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
238{
239 ipc_rmid(&sem_ids(ns), &s->sem_perm);
240}
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274#define IN_WAKEUP 1
275
276
277
278
279
280
281
282
283
284static int newary(struct ipc_namespace *ns, struct ipc_params *params)
285{
286 int id;
287 int retval;
288 struct sem_array *sma;
289 int size;
290 key_t key = params->key;
291 int nsems = params->u.nsems;
292 int semflg = params->flg;
293 int i;
294
295 if (!nsems)
296 return -EINVAL;
297 if (ns->used_sems + nsems > ns->sc_semmns)
298 return -ENOSPC;
299
300 size = sizeof (*sma) + nsems * sizeof (struct sem);
301 sma = ipc_rcu_alloc(size);
302 if (!sma) {
303 return -ENOMEM;
304 }
305 memset (sma, 0, size);
306
307 sma->sem_perm.mode = (semflg & S_IRWXUGO);
308 sma->sem_perm.key = key;
309
310 sma->sem_perm.security = NULL;
311 retval = security_sem_alloc(sma);
312 if (retval) {
313 ipc_rcu_putref(sma);
314 return retval;
315 }
316
317 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
318 if (id < 0) {
319 security_sem_free(sma);
320 ipc_rcu_putref(sma);
321 return id;
322 }
323 ns->used_sems += nsems;
324
325 sma->sem_base = (struct sem *) &sma[1];
326
327 for (i = 0; i < nsems; i++)
328 INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
329
330 sma->complex_count = 0;
331 INIT_LIST_HEAD(&sma->sem_pending);
332 INIT_LIST_HEAD(&sma->list_id);
333 sma->sem_nsems = nsems;
334 sma->sem_ctime = get_seconds();
335 sem_unlock(sma);
336
337 return sma->sem_perm.id;
338}
339
340
341
342
343
344static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
345{
346 struct sem_array *sma;
347
348 sma = container_of(ipcp, struct sem_array, sem_perm);
349 return security_sem_associate(sma, semflg);
350}
351
352
353
354
355static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
356 struct ipc_params *params)
357{
358 struct sem_array *sma;
359
360 sma = container_of(ipcp, struct sem_array, sem_perm);
361 if (params->u.nsems > sma->sem_nsems)
362 return -EINVAL;
363
364 return 0;
365}
366
367SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
368{
369 struct ipc_namespace *ns;
370 struct ipc_ops sem_ops;
371 struct ipc_params sem_params;
372
373 ns = current->nsproxy->ipc_ns;
374
375 if (nsems < 0 || nsems > ns->sc_semmsl)
376 return -EINVAL;
377
378 sem_ops.getnew = newary;
379 sem_ops.associate = sem_security;
380 sem_ops.more_checks = sem_more_checks;
381
382 sem_params.key = key;
383 sem_params.flg = semflg;
384 sem_params.u.nsems = nsems;
385
386 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
387}
388
389
390
391
392
393
394static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
395 int nsops, struct sem_undo *un, int pid)
396{
397 int result, sem_op;
398 struct sembuf *sop;
399 struct sem * curr;
400
401 for (sop = sops; sop < sops + nsops; sop++) {
402 curr = sma->sem_base + sop->sem_num;
403 sem_op = sop->sem_op;
404 result = curr->semval;
405
406 if (!sem_op && result)
407 goto would_block;
408
409 result += sem_op;
410 if (result < 0)
411 goto would_block;
412 if (result > SEMVMX)
413 goto out_of_range;
414 if (sop->sem_flg & SEM_UNDO) {
415 int undo = un->semadj[sop->sem_num] - sem_op;
416
417
418
419 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
420 goto out_of_range;
421 }
422 curr->semval = result;
423 }
424
425 sop--;
426 while (sop >= sops) {
427 sma->sem_base[sop->sem_num].sempid = pid;
428 if (sop->sem_flg & SEM_UNDO)
429 un->semadj[sop->sem_num] -= sop->sem_op;
430 sop--;
431 }
432
433 return 0;
434
435out_of_range:
436 result = -ERANGE;
437 goto undo;
438
439would_block:
440 if (sop->sem_flg & IPC_NOWAIT)
441 result = -EAGAIN;
442 else
443 result = 1;
444
445undo:
446 sop--;
447 while (sop >= sops) {
448 sma->sem_base[sop->sem_num].semval -= sop->sem_op;
449 sop--;
450 }
451
452 return result;
453}
454
455
456
457
458
459
460
461static void wake_up_sem_queue_prepare(struct list_head *pt,
462 struct sem_queue *q, int error)
463{
464 if (list_empty(pt)) {
465
466
467
468
469 preempt_disable();
470 }
471 q->status = IN_WAKEUP;
472 q->pid = error;
473
474 list_add_tail(&q->simple_list, pt);
475}
476
477
478
479
480
481
482
483
484
485
486static void wake_up_sem_queue_do(struct list_head *pt)
487{
488 struct sem_queue *q, *t;
489 int did_something;
490
491 did_something = !list_empty(pt);
492 list_for_each_entry_safe(q, t, pt, simple_list) {
493 wake_up_process(q->sleeper);
494
495 smp_wmb();
496 q->status = q->pid;
497 }
498 if (did_something)
499 preempt_enable();
500}
501
502static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
503{
504 list_del(&q->list);
505 if (q->nsops == 1)
506 list_del(&q->simple_list);
507 else
508 sma->complex_count--;
509}
510
511
512
513
514
515
516
517
518
519
520static int check_restart(struct sem_array *sma, struct sem_queue *q)
521{
522 struct sem *curr;
523 struct sem_queue *h;
524
525
526 if (q->alter == 0)
527 return 0;
528
529
530 if (sma->complex_count)
531 return 1;
532
533
534 if (q->nsops > 1)
535 return 1;
536
537 curr = sma->sem_base + q->sops[0].sem_num;
538
539
540 if (list_empty(&curr->sem_pending))
541 return 0;
542
543
544 if (curr->semval) {
545
546
547
548
549
550
551
552
553
554
555 BUG_ON(q->sops[0].sem_op >= 0);
556 return 0;
557 }
558
559
560
561
562 h = list_first_entry(&curr->sem_pending, struct sem_queue, simple_list);
563 BUG_ON(h->nsops != 1);
564 BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
565
566
567 if (h->sops[0].sem_op == 0)
568 return 1;
569
570
571 return 0;
572}
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
589{
590 struct sem_queue *q;
591 struct list_head *walk;
592 struct list_head *pending_list;
593 int offset;
594 int semop_completed = 0;
595
596
597
598
599
600 if (sma->complex_count)
601 semnum = -1;
602
603 if (semnum == -1) {
604 pending_list = &sma->sem_pending;
605 offset = offsetof(struct sem_queue, list);
606 } else {
607 pending_list = &sma->sem_base[semnum].sem_pending;
608 offset = offsetof(struct sem_queue, simple_list);
609 }
610
611again:
612 walk = pending_list->next;
613 while (walk != pending_list) {
614 int error, restart;
615
616 q = (struct sem_queue *)((char *)walk - offset);
617 walk = walk->next;
618
619
620
621
622
623
624
625
626 if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
627 q->alter)
628 break;
629
630 error = try_atomic_semop(sma, q->sops, q->nsops,
631 q->undo, q->pid);
632
633
634 if (error > 0)
635 continue;
636
637 unlink_queue(sma, q);
638
639 if (error) {
640 restart = 0;
641 } else {
642 semop_completed = 1;
643 restart = check_restart(sma, q);
644 }
645
646 wake_up_sem_queue_prepare(pt, q, error);
647 if (restart)
648 goto again;
649 }
650 return semop_completed;
651}
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
668 int otime, struct list_head *pt)
669{
670 int i;
671
672 if (sma->complex_count || sops == NULL) {
673 if (update_queue(sma, -1, pt))
674 otime = 1;
675 goto done;
676 }
677
678 for (i = 0; i < nsops; i++) {
679 if (sops[i].sem_op > 0 ||
680 (sops[i].sem_op < 0 &&
681 sma->sem_base[sops[i].sem_num].semval == 0))
682 if (update_queue(sma, sops[i].sem_num, pt))
683 otime = 1;
684 }
685done:
686 if (otime)
687 sma->sem_otime = get_seconds();
688}
689
690
691
692
693
694
695
696
697
698
699
700static int count_semncnt (struct sem_array * sma, ushort semnum)
701{
702 int semncnt;
703 struct sem_queue * q;
704
705 semncnt = 0;
706 list_for_each_entry(q, &sma->sem_pending, list) {
707 struct sembuf * sops = q->sops;
708 int nsops = q->nsops;
709 int i;
710 for (i = 0; i < nsops; i++)
711 if (sops[i].sem_num == semnum
712 && (sops[i].sem_op < 0)
713 && !(sops[i].sem_flg & IPC_NOWAIT))
714 semncnt++;
715 }
716 return semncnt;
717}
718
719static int count_semzcnt (struct sem_array * sma, ushort semnum)
720{
721 int semzcnt;
722 struct sem_queue * q;
723
724 semzcnt = 0;
725 list_for_each_entry(q, &sma->sem_pending, list) {
726 struct sembuf * sops = q->sops;
727 int nsops = q->nsops;
728 int i;
729 for (i = 0; i < nsops; i++)
730 if (sops[i].sem_num == semnum
731 && (sops[i].sem_op == 0)
732 && !(sops[i].sem_flg & IPC_NOWAIT))
733 semzcnt++;
734 }
735 return semzcnt;
736}
737
738
739
740
741
742static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
743{
744 struct sem_undo *un, *tu;
745 struct sem_queue *q, *tq;
746 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
747 struct list_head tasks;
748
749
750 assert_spin_locked(&sma->sem_perm.lock);
751 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
752 list_del(&un->list_id);
753 spin_lock(&un->ulp->lock);
754 un->semid = -1;
755 list_del_rcu(&un->list_proc);
756 spin_unlock(&un->ulp->lock);
757 kfree_rcu(un, rcu);
758 }
759
760
761 INIT_LIST_HEAD(&tasks);
762 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
763 unlink_queue(sma, q);
764 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
765 }
766
767
768 sem_rmid(ns, sma);
769 sem_unlock(sma);
770
771 wake_up_sem_queue_do(&tasks);
772 ns->used_sems -= sma->sem_nsems;
773 security_sem_free(sma);
774 ipc_rcu_putref(sma);
775}
776
777static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
778{
779 switch(version) {
780 case IPC_64:
781 return copy_to_user(buf, in, sizeof(*in));
782 case IPC_OLD:
783 {
784 struct semid_ds out;
785
786 memset(&out, 0, sizeof(out));
787
788 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
789
790 out.sem_otime = in->sem_otime;
791 out.sem_ctime = in->sem_ctime;
792 out.sem_nsems = in->sem_nsems;
793
794 return copy_to_user(buf, &out, sizeof(out));
795 }
796 default:
797 return -EINVAL;
798 }
799}
800
801static int semctl_nolock(struct ipc_namespace *ns, int semid,
802 int cmd, int version, union semun arg)
803{
804 int err;
805 struct sem_array *sma;
806
807 switch(cmd) {
808 case IPC_INFO:
809 case SEM_INFO:
810 {
811 struct seminfo seminfo;
812 int max_id;
813
814 err = security_sem_semctl(NULL, cmd);
815 if (err)
816 return err;
817
818 memset(&seminfo,0,sizeof(seminfo));
819 seminfo.semmni = ns->sc_semmni;
820 seminfo.semmns = ns->sc_semmns;
821 seminfo.semmsl = ns->sc_semmsl;
822 seminfo.semopm = ns->sc_semopm;
823 seminfo.semvmx = SEMVMX;
824 seminfo.semmnu = SEMMNU;
825 seminfo.semmap = SEMMAP;
826 seminfo.semume = SEMUME;
827 down_read(&sem_ids(ns).rw_mutex);
828 if (cmd == SEM_INFO) {
829 seminfo.semusz = sem_ids(ns).in_use;
830 seminfo.semaem = ns->used_sems;
831 } else {
832 seminfo.semusz = SEMUSZ;
833 seminfo.semaem = SEMAEM;
834 }
835 max_id = ipc_get_maxid(&sem_ids(ns));
836 up_read(&sem_ids(ns).rw_mutex);
837 if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo)))
838 return -EFAULT;
839 return (max_id < 0) ? 0: max_id;
840 }
841 case IPC_STAT:
842 case SEM_STAT:
843 {
844 struct semid64_ds tbuf;
845 int id;
846
847 if (cmd == SEM_STAT) {
848 sma = sem_lock(ns, semid);
849 if (IS_ERR(sma))
850 return PTR_ERR(sma);
851 id = sma->sem_perm.id;
852 } else {
853 sma = sem_lock_check(ns, semid);
854 if (IS_ERR(sma))
855 return PTR_ERR(sma);
856 id = 0;
857 }
858
859 err = -EACCES;
860 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
861 goto out_unlock;
862
863 err = security_sem_semctl(sma, cmd);
864 if (err)
865 goto out_unlock;
866
867 memset(&tbuf, 0, sizeof(tbuf));
868
869 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
870 tbuf.sem_otime = sma->sem_otime;
871 tbuf.sem_ctime = sma->sem_ctime;
872 tbuf.sem_nsems = sma->sem_nsems;
873 sem_unlock(sma);
874 if (copy_semid_to_user (arg.buf, &tbuf, version))
875 return -EFAULT;
876 return id;
877 }
878 default:
879 return -EINVAL;
880 }
881out_unlock:
882 sem_unlock(sma);
883 return err;
884}
885
886static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
887 int cmd, int version, union semun arg)
888{
889 struct sem_array *sma;
890 struct sem* curr;
891 int err;
892 ushort fast_sem_io[SEMMSL_FAST];
893 ushort* sem_io = fast_sem_io;
894 int nsems;
895 struct list_head tasks;
896
897 sma = sem_lock_check(ns, semid);
898 if (IS_ERR(sma))
899 return PTR_ERR(sma);
900
901 INIT_LIST_HEAD(&tasks);
902 nsems = sma->sem_nsems;
903
904 err = -EACCES;
905 if (ipcperms(ns, &sma->sem_perm,
906 (cmd == SETVAL || cmd == SETALL) ? S_IWUGO : S_IRUGO))
907 goto out_unlock;
908
909 err = security_sem_semctl(sma, cmd);
910 if (err)
911 goto out_unlock;
912
913 err = -EACCES;
914 switch (cmd) {
915 case GETALL:
916 {
917 ushort __user *array = arg.array;
918 int i;
919
920 if(nsems > SEMMSL_FAST) {
921 sem_getref_and_unlock(sma);
922
923 sem_io = ipc_alloc(sizeof(ushort)*nsems);
924 if(sem_io == NULL) {
925 sem_putref(sma);
926 return -ENOMEM;
927 }
928
929 sem_lock_and_putref(sma);
930 if (sma->sem_perm.deleted) {
931 sem_unlock(sma);
932 err = -EIDRM;
933 goto out_free;
934 }
935 }
936
937 for (i = 0; i < sma->sem_nsems; i++)
938 sem_io[i] = sma->sem_base[i].semval;
939 sem_unlock(sma);
940 err = 0;
941 if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
942 err = -EFAULT;
943 goto out_free;
944 }
945 case SETALL:
946 {
947 int i;
948 struct sem_undo *un;
949
950 sem_getref_and_unlock(sma);
951
952 if(nsems > SEMMSL_FAST) {
953 sem_io = ipc_alloc(sizeof(ushort)*nsems);
954 if(sem_io == NULL) {
955 sem_putref(sma);
956 return -ENOMEM;
957 }
958 }
959
960 if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) {
961 sem_putref(sma);
962 err = -EFAULT;
963 goto out_free;
964 }
965
966 for (i = 0; i < nsems; i++) {
967 if (sem_io[i] > SEMVMX) {
968 sem_putref(sma);
969 err = -ERANGE;
970 goto out_free;
971 }
972 }
973 sem_lock_and_putref(sma);
974 if (sma->sem_perm.deleted) {
975 sem_unlock(sma);
976 err = -EIDRM;
977 goto out_free;
978 }
979
980 for (i = 0; i < nsems; i++)
981 sma->sem_base[i].semval = sem_io[i];
982
983 assert_spin_locked(&sma->sem_perm.lock);
984 list_for_each_entry(un, &sma->list_id, list_id) {
985 for (i = 0; i < nsems; i++)
986 un->semadj[i] = 0;
987 }
988 sma->sem_ctime = get_seconds();
989
990 do_smart_update(sma, NULL, 0, 0, &tasks);
991 err = 0;
992 goto out_unlock;
993 }
994
995 }
996 err = -EINVAL;
997 if(semnum < 0 || semnum >= nsems)
998 goto out_unlock;
999
1000 curr = &sma->sem_base[semnum];
1001
1002 switch (cmd) {
1003 case GETVAL:
1004 err = curr->semval;
1005 goto out_unlock;
1006 case GETPID:
1007 err = curr->sempid;
1008 goto out_unlock;
1009 case GETNCNT:
1010 err = count_semncnt(sma,semnum);
1011 goto out_unlock;
1012 case GETZCNT:
1013 err = count_semzcnt(sma,semnum);
1014 goto out_unlock;
1015 case SETVAL:
1016 {
1017 int val = arg.val;
1018 struct sem_undo *un;
1019
1020 err = -ERANGE;
1021 if (val > SEMVMX || val < 0)
1022 goto out_unlock;
1023
1024 assert_spin_locked(&sma->sem_perm.lock);
1025 list_for_each_entry(un, &sma->list_id, list_id)
1026 un->semadj[semnum] = 0;
1027
1028 curr->semval = val;
1029 curr->sempid = task_tgid_vnr(current);
1030 sma->sem_ctime = get_seconds();
1031
1032 do_smart_update(sma, NULL, 0, 0, &tasks);
1033 err = 0;
1034 goto out_unlock;
1035 }
1036 }
1037out_unlock:
1038 sem_unlock(sma);
1039 wake_up_sem_queue_do(&tasks);
1040
1041out_free:
1042 if(sem_io != fast_sem_io)
1043 ipc_free(sem_io, sizeof(ushort)*nsems);
1044 return err;
1045}
1046
1047static inline unsigned long
1048copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1049{
1050 switch(version) {
1051 case IPC_64:
1052 if (copy_from_user(out, buf, sizeof(*out)))
1053 return -EFAULT;
1054 return 0;
1055 case IPC_OLD:
1056 {
1057 struct semid_ds tbuf_old;
1058
1059 if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1060 return -EFAULT;
1061
1062 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1063 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1064 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1065
1066 return 0;
1067 }
1068 default:
1069 return -EINVAL;
1070 }
1071}
1072
1073
1074
1075
1076
1077
1078static int semctl_down(struct ipc_namespace *ns, int semid,
1079 int cmd, int version, union semun arg)
1080{
1081 struct sem_array *sma;
1082 int err;
1083 struct semid64_ds semid64;
1084 struct kern_ipc_perm *ipcp;
1085
1086 if(cmd == IPC_SET) {
1087 if (copy_semid_from_user(&semid64, arg.buf, version))
1088 return -EFAULT;
1089 }
1090
1091 ipcp = ipcctl_pre_down(ns, &sem_ids(ns), semid, cmd,
1092 &semid64.sem_perm, 0);
1093 if (IS_ERR(ipcp))
1094 return PTR_ERR(ipcp);
1095
1096 sma = container_of(ipcp, struct sem_array, sem_perm);
1097
1098 err = security_sem_semctl(sma, cmd);
1099 if (err)
1100 goto out_unlock;
1101
1102 switch(cmd){
1103 case IPC_RMID:
1104 freeary(ns, ipcp);
1105 goto out_up;
1106 case IPC_SET:
1107 err = ipc_update_perm(&semid64.sem_perm, ipcp);
1108 if (err)
1109 goto out_unlock;
1110 sma->sem_ctime = get_seconds();
1111 break;
1112 default:
1113 err = -EINVAL;
1114 }
1115
1116out_unlock:
1117 sem_unlock(sma);
1118out_up:
1119 up_write(&sem_ids(ns).rw_mutex);
1120 return err;
1121}
1122
1123SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg)
1124{
1125 int err = -EINVAL;
1126 int version;
1127 struct ipc_namespace *ns;
1128
1129 if (semid < 0)
1130 return -EINVAL;
1131
1132 version = ipc_parse_version(&cmd);
1133 ns = current->nsproxy->ipc_ns;
1134
1135 switch(cmd) {
1136 case IPC_INFO:
1137 case SEM_INFO:
1138 case IPC_STAT:
1139 case SEM_STAT:
1140 err = semctl_nolock(ns, semid, cmd, version, arg);
1141 return err;
1142 case GETALL:
1143 case GETVAL:
1144 case GETPID:
1145 case GETNCNT:
1146 case GETZCNT:
1147 case SETVAL:
1148 case SETALL:
1149 err = semctl_main(ns,semid,semnum,cmd,version,arg);
1150 return err;
1151 case IPC_RMID:
1152 case IPC_SET:
1153 err = semctl_down(ns, semid, cmd, version, arg);
1154 return err;
1155 default:
1156 return -EINVAL;
1157 }
1158}
1159#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
1160asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg)
1161{
1162 return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg);
1163}
1164SYSCALL_ALIAS(sys_semctl, SyS_semctl);
1165#endif
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178static inline int get_undo_list(struct sem_undo_list **undo_listp)
1179{
1180 struct sem_undo_list *undo_list;
1181
1182 undo_list = current->sysvsem.undo_list;
1183 if (!undo_list) {
1184 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1185 if (undo_list == NULL)
1186 return -ENOMEM;
1187 spin_lock_init(&undo_list->lock);
1188 atomic_set(&undo_list->refcnt, 1);
1189 INIT_LIST_HEAD(&undo_list->list_proc);
1190
1191 current->sysvsem.undo_list = undo_list;
1192 }
1193 *undo_listp = undo_list;
1194 return 0;
1195}
1196
1197static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1198{
1199 struct sem_undo *un;
1200
1201 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1202 if (un->semid == semid)
1203 return un;
1204 }
1205 return NULL;
1206}
1207
1208static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1209{
1210 struct sem_undo *un;
1211
1212 assert_spin_locked(&ulp->lock);
1213
1214 un = __lookup_undo(ulp, semid);
1215 if (un) {
1216 list_del_rcu(&un->list_proc);
1217 list_add_rcu(&un->list_proc, &ulp->list_proc);
1218 }
1219 return un;
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1234{
1235 struct sem_array *sma;
1236 struct sem_undo_list *ulp;
1237 struct sem_undo *un, *new;
1238 int nsems;
1239 int error;
1240
1241 error = get_undo_list(&ulp);
1242 if (error)
1243 return ERR_PTR(error);
1244
1245 rcu_read_lock();
1246 spin_lock(&ulp->lock);
1247 un = lookup_undo(ulp, semid);
1248 spin_unlock(&ulp->lock);
1249 if (likely(un!=NULL))
1250 goto out;
1251 rcu_read_unlock();
1252
1253
1254
1255 sma = sem_lock_check(ns, semid);
1256 if (IS_ERR(sma))
1257 return ERR_CAST(sma);
1258
1259 nsems = sma->sem_nsems;
1260 sem_getref_and_unlock(sma);
1261
1262
1263 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1264 if (!new) {
1265 sem_putref(sma);
1266 return ERR_PTR(-ENOMEM);
1267 }
1268
1269
1270 sem_lock_and_putref(sma);
1271 if (sma->sem_perm.deleted) {
1272 sem_unlock(sma);
1273 kfree(new);
1274 un = ERR_PTR(-EIDRM);
1275 goto out;
1276 }
1277 spin_lock(&ulp->lock);
1278
1279
1280
1281
1282 un = lookup_undo(ulp, semid);
1283 if (un) {
1284 kfree(new);
1285 goto success;
1286 }
1287
1288 new->semadj = (short *) &new[1];
1289 new->ulp = ulp;
1290 new->semid = semid;
1291 assert_spin_locked(&ulp->lock);
1292 list_add_rcu(&new->list_proc, &ulp->list_proc);
1293 assert_spin_locked(&sma->sem_perm.lock);
1294 list_add(&new->list_id, &sma->list_id);
1295 un = new;
1296
1297success:
1298 spin_unlock(&ulp->lock);
1299 rcu_read_lock();
1300 sem_unlock(sma);
1301out:
1302 return un;
1303}
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318static int get_queue_result(struct sem_queue *q)
1319{
1320 int error;
1321
1322 error = q->status;
1323 while (unlikely(error == IN_WAKEUP)) {
1324 cpu_relax();
1325 error = q->status;
1326 }
1327
1328 return error;
1329}
1330
1331
1332SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1333 unsigned, nsops, const struct timespec __user *, timeout)
1334{
1335 int error = -EINVAL;
1336 struct sem_array *sma;
1337 struct sembuf fast_sops[SEMOPM_FAST];
1338 struct sembuf* sops = fast_sops, *sop;
1339 struct sem_undo *un;
1340 int undos = 0, alter = 0, max;
1341 struct sem_queue queue;
1342 unsigned long jiffies_left = 0;
1343 struct ipc_namespace *ns;
1344 struct list_head tasks;
1345
1346 ns = current->nsproxy->ipc_ns;
1347
1348 if (nsops < 1 || semid < 0)
1349 return -EINVAL;
1350 if (nsops > ns->sc_semopm)
1351 return -E2BIG;
1352 if(nsops > SEMOPM_FAST) {
1353 sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
1354 if(sops==NULL)
1355 return -ENOMEM;
1356 }
1357 if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
1358 error=-EFAULT;
1359 goto out_free;
1360 }
1361 if (timeout) {
1362 struct timespec _timeout;
1363 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1364 error = -EFAULT;
1365 goto out_free;
1366 }
1367 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1368 _timeout.tv_nsec >= 1000000000L) {
1369 error = -EINVAL;
1370 goto out_free;
1371 }
1372 jiffies_left = timespec_to_jiffies(&_timeout);
1373 }
1374 max = 0;
1375 for (sop = sops; sop < sops + nsops; sop++) {
1376 if (sop->sem_num >= max)
1377 max = sop->sem_num;
1378 if (sop->sem_flg & SEM_UNDO)
1379 undos = 1;
1380 if (sop->sem_op != 0)
1381 alter = 1;
1382 }
1383
1384 if (undos) {
1385 un = find_alloc_undo(ns, semid);
1386 if (IS_ERR(un)) {
1387 error = PTR_ERR(un);
1388 goto out_free;
1389 }
1390 } else
1391 un = NULL;
1392
1393 INIT_LIST_HEAD(&tasks);
1394
1395 sma = sem_lock_check(ns, semid);
1396 if (IS_ERR(sma)) {
1397 if (un)
1398 rcu_read_unlock();
1399 error = PTR_ERR(sma);
1400 goto out_free;
1401 }
1402
1403
1404
1405
1406
1407
1408
1409
1410 error = -EIDRM;
1411 if (un) {
1412 if (un->semid == -1) {
1413 rcu_read_unlock();
1414 goto out_unlock_free;
1415 } else {
1416
1417
1418
1419
1420
1421
1422
1423
1424 rcu_read_unlock();
1425 }
1426 }
1427
1428 error = -EFBIG;
1429 if (max >= sma->sem_nsems)
1430 goto out_unlock_free;
1431
1432 error = -EACCES;
1433 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1434 goto out_unlock_free;
1435
1436 error = security_sem_semop(sma, sops, nsops, alter);
1437 if (error)
1438 goto out_unlock_free;
1439
1440 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1441 if (error <= 0) {
1442 if (alter && error == 0)
1443 do_smart_update(sma, sops, nsops, 1, &tasks);
1444
1445 goto out_unlock_free;
1446 }
1447
1448
1449
1450
1451
1452 queue.sops = sops;
1453 queue.nsops = nsops;
1454 queue.undo = un;
1455 queue.pid = task_tgid_vnr(current);
1456 queue.alter = alter;
1457 if (alter)
1458 list_add_tail(&queue.list, &sma->sem_pending);
1459 else
1460 list_add(&queue.list, &sma->sem_pending);
1461
1462 if (nsops == 1) {
1463 struct sem *curr;
1464 curr = &sma->sem_base[sops->sem_num];
1465
1466 if (alter)
1467 list_add_tail(&queue.simple_list, &curr->sem_pending);
1468 else
1469 list_add(&queue.simple_list, &curr->sem_pending);
1470 } else {
1471 INIT_LIST_HEAD(&queue.simple_list);
1472 sma->complex_count++;
1473 }
1474
1475 queue.status = -EINTR;
1476 queue.sleeper = current;
1477
1478sleep_again:
1479 current->state = TASK_INTERRUPTIBLE;
1480 sem_unlock(sma);
1481
1482 if (timeout)
1483 jiffies_left = schedule_timeout(jiffies_left);
1484 else
1485 schedule();
1486
1487 error = get_queue_result(&queue);
1488
1489 if (error != -EINTR) {
1490
1491
1492
1493
1494
1495
1496
1497 smp_mb();
1498
1499 goto out_free;
1500 }
1501
1502 sma = sem_lock(ns, semid);
1503
1504
1505
1506
1507 error = get_queue_result(&queue);
1508
1509
1510
1511
1512 if (IS_ERR(sma)) {
1513 goto out_free;
1514 }
1515
1516
1517
1518
1519
1520
1521
1522 if (error != -EINTR) {
1523 goto out_unlock_free;
1524 }
1525
1526
1527
1528
1529 if (timeout && jiffies_left == 0)
1530 error = -EAGAIN;
1531
1532
1533
1534
1535 if (error == -EINTR && !signal_pending(current))
1536 goto sleep_again;
1537
1538 unlink_queue(sma, &queue);
1539
1540out_unlock_free:
1541 sem_unlock(sma);
1542
1543 wake_up_sem_queue_do(&tasks);
1544out_free:
1545 if(sops != fast_sops)
1546 kfree(sops);
1547 return error;
1548}
1549
1550SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
1551 unsigned, nsops)
1552{
1553 return sys_semtimedop(semid, tsops, nsops, NULL);
1554}
1555
1556
1557
1558
1559
1560int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1561{
1562 struct sem_undo_list *undo_list;
1563 int error;
1564
1565 if (clone_flags & CLONE_SYSVSEM) {
1566 error = get_undo_list(&undo_list);
1567 if (error)
1568 return error;
1569 atomic_inc(&undo_list->refcnt);
1570 tsk->sysvsem.undo_list = undo_list;
1571 } else
1572 tsk->sysvsem.undo_list = NULL;
1573
1574 return 0;
1575}
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589void exit_sem(struct task_struct *tsk)
1590{
1591 struct sem_undo_list *ulp;
1592
1593 ulp = tsk->sysvsem.undo_list;
1594 if (!ulp)
1595 return;
1596 tsk->sysvsem.undo_list = NULL;
1597
1598 if (!atomic_dec_and_test(&ulp->refcnt))
1599 return;
1600
1601 for (;;) {
1602 struct sem_array *sma;
1603 struct sem_undo *un;
1604 struct list_head tasks;
1605 int semid;
1606 int i;
1607
1608 rcu_read_lock();
1609 un = list_entry_rcu(ulp->list_proc.next,
1610 struct sem_undo, list_proc);
1611 if (&un->list_proc == &ulp->list_proc)
1612 semid = -1;
1613 else
1614 semid = un->semid;
1615 rcu_read_unlock();
1616
1617 if (semid == -1)
1618 break;
1619
1620 sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
1621
1622
1623 if (IS_ERR(sma))
1624 continue;
1625
1626 un = __lookup_undo(ulp, semid);
1627 if (un == NULL) {
1628
1629
1630
1631 sem_unlock(sma);
1632 continue;
1633 }
1634
1635
1636 assert_spin_locked(&sma->sem_perm.lock);
1637 list_del(&un->list_id);
1638
1639 spin_lock(&ulp->lock);
1640 list_del_rcu(&un->list_proc);
1641 spin_unlock(&ulp->lock);
1642
1643
1644 for (i = 0; i < sma->sem_nsems; i++) {
1645 struct sem * semaphore = &sma->sem_base[i];
1646 if (un->semadj[i]) {
1647 semaphore->semval += un->semadj[i];
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661 if (semaphore->semval < 0)
1662 semaphore->semval = 0;
1663 if (semaphore->semval > SEMVMX)
1664 semaphore->semval = SEMVMX;
1665 semaphore->sempid = task_tgid_vnr(current);
1666 }
1667 }
1668
1669 INIT_LIST_HEAD(&tasks);
1670 do_smart_update(sma, NULL, 0, 1, &tasks);
1671 sem_unlock(sma);
1672 wake_up_sem_queue_do(&tasks);
1673
1674 kfree_rcu(un, rcu);
1675 }
1676 kfree(ulp);
1677}
1678
1679#ifdef CONFIG_PROC_FS
1680static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1681{
1682 struct user_namespace *user_ns = seq_user_ns(s);
1683 struct sem_array *sma = it;
1684
1685 return seq_printf(s,
1686 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
1687 sma->sem_perm.key,
1688 sma->sem_perm.id,
1689 sma->sem_perm.mode,
1690 sma->sem_nsems,
1691 from_kuid_munged(user_ns, sma->sem_perm.uid),
1692 from_kgid_munged(user_ns, sma->sem_perm.gid),
1693 from_kuid_munged(user_ns, sma->sem_perm.cuid),
1694 from_kgid_munged(user_ns, sma->sem_perm.cgid),
1695 sma->sem_otime,
1696 sma->sem_ctime);
1697}
1698#endif
1699