1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76#include <linux/slab.h>
77#include <linux/spinlock.h>
78#include <linux/init.h>
79#include <linux/proc_fs.h>
80#include <linux/time.h>
81#include <linux/security.h>
82#include <linux/syscalls.h>
83#include <linux/audit.h>
84#include <linux/capability.h>
85#include <linux/seq_file.h>
86#include <linux/rwsem.h>
87#include <linux/nsproxy.h>
88#include <linux/ipc_namespace.h>
89
90#include <asm/uaccess.h>
91#include "util.h"
92
93
94struct sem {
95 int semval;
96 int sempid;
97 spinlock_t lock;
98 struct list_head pending_alter;
99
100 struct list_head pending_const;
101
102 time_t sem_otime;
103} ____cacheline_aligned_in_smp;
104
105
106struct sem_queue {
107 struct list_head list;
108 struct task_struct *sleeper;
109 struct sem_undo *undo;
110 int pid;
111 int status;
112 struct sembuf *sops;
113 int nsops;
114 int alter;
115};
116
117
118
119
120struct sem_undo {
121 struct list_head list_proc;
122
123
124 struct rcu_head rcu;
125 struct sem_undo_list *ulp;
126 struct list_head list_id;
127
128 int semid;
129 short *semadj;
130
131};
132
133
134
135
136struct sem_undo_list {
137 atomic_t refcnt;
138 spinlock_t lock;
139 struct list_head list_proc;
140};
141
142
143#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
144
145#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
146
147static int newary(struct ipc_namespace *, struct ipc_params *);
148static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
149#ifdef CONFIG_PROC_FS
150static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
151#endif
152
153#define SEMMSL_FAST 256
154#define SEMOPM_FAST 64
155
156
157
158
159
160
161
162
163
164
165
166
167
168#define sc_semmsl sem_ctls[0]
169#define sc_semmns sem_ctls[1]
170#define sc_semopm sem_ctls[2]
171#define sc_semmni sem_ctls[3]
172
173void sem_init_ns(struct ipc_namespace *ns)
174{
175 ns->sc_semmsl = SEMMSL;
176 ns->sc_semmns = SEMMNS;
177 ns->sc_semopm = SEMOPM;
178 ns->sc_semmni = SEMMNI;
179 ns->used_sems = 0;
180 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
181}
182
183#ifdef CONFIG_IPC_NS
184void sem_exit_ns(struct ipc_namespace *ns)
185{
186 free_ipcs(ns, &sem_ids(ns), freeary);
187 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
188}
189#endif
190
191void __init sem_init (void)
192{
193 sem_init_ns(&init_ipc_ns);
194 ipc_init_proc_interface("sysvipc/sem",
195 " key semid perms nsems uid gid cuid cgid otime ctime\n",
196 IPC_SEM_IDS, sysvipc_sem_proc_show);
197}
198
199
200
201
202
203
204
205
206static void unmerge_queues(struct sem_array *sma)
207{
208 struct sem_queue *q, *tq;
209
210
211 if (sma->complex_count)
212 return;
213
214
215
216
217
218 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
219 struct sem *curr;
220 curr = &sma->sem_base[q->sops[0].sem_num];
221
222 list_add_tail(&q->list, &curr->pending_alter);
223 }
224 INIT_LIST_HEAD(&sma->pending_alter);
225}
226
227
228
229
230
231
232
233
234
235
236static void merge_queues(struct sem_array *sma)
237{
238 int i;
239 for (i = 0; i < sma->sem_nsems; i++) {
240 struct sem *sem = sma->sem_base + i;
241
242 list_splice_init(&sem->pending_alter, &sma->pending_alter);
243 }
244}
245
246
247
248
249
250
251
252
253static void sem_wait_array(struct sem_array *sma)
254{
255 int i;
256 struct sem *sem;
257
258 if (sma->complex_count) {
259
260
261
262 return;
263 }
264
265 for (i = 0; i < sma->sem_nsems; i++) {
266 sem = sma->sem_base + i;
267 spin_unlock_wait(&sem->lock);
268 }
269}
270
271static void sem_rcu_free(struct rcu_head *head)
272{
273 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
274 struct sem_array *sma = ipc_rcu_to_struct(p);
275
276 security_sem_free(sma);
277 ipc_rcu_free(head);
278}
279
280
281
282
283
284
285
286
287static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
288 int nsops)
289{
290 struct sem *sem;
291
292 if (nsops != 1) {
293
294 ipc_lock_object(&sma->sem_perm);
295
296
297
298
299 sem_wait_array(sma);
300 return -1;
301 }
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318 sem = sma->sem_base + sops->sem_num;
319
320 if (sma->complex_count == 0) {
321
322
323
324
325 spin_lock(&sem->lock);
326
327
328 if (!spin_is_locked(&sma->sem_perm.lock)) {
329
330 smp_mb();
331
332
333
334
335
336 if (sma->complex_count == 0) {
337
338 return sops->sem_num;
339 }
340 }
341 spin_unlock(&sem->lock);
342 }
343
344
345 ipc_lock_object(&sma->sem_perm);
346
347 if (sma->complex_count == 0) {
348
349
350
351
352 spin_lock(&sem->lock);
353 ipc_unlock_object(&sma->sem_perm);
354 return sops->sem_num;
355 } else {
356
357
358
359 sem_wait_array(sma);
360 return -1;
361 }
362}
363
364static inline void sem_unlock(struct sem_array *sma, int locknum)
365{
366 if (locknum == -1) {
367 unmerge_queues(sma);
368 ipc_unlock_object(&sma->sem_perm);
369 } else {
370 struct sem *sem = sma->sem_base + locknum;
371 spin_unlock(&sem->lock);
372 }
373}
374
375
376
377
378
379
380
381static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
382 int id, struct sembuf *sops, int nsops, int *locknum)
383{
384 struct kern_ipc_perm *ipcp;
385 struct sem_array *sma;
386
387 ipcp = ipc_obtain_object(&sem_ids(ns), id);
388 if (IS_ERR(ipcp))
389 return ERR_CAST(ipcp);
390
391 sma = container_of(ipcp, struct sem_array, sem_perm);
392 *locknum = sem_lock(sma, sops, nsops);
393
394
395
396
397 if (!ipcp->deleted)
398 return container_of(ipcp, struct sem_array, sem_perm);
399
400 sem_unlock(sma, *locknum);
401 return ERR_PTR(-EINVAL);
402}
403
404static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
405{
406 struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id);
407
408 if (IS_ERR(ipcp))
409 return ERR_CAST(ipcp);
410
411 return container_of(ipcp, struct sem_array, sem_perm);
412}
413
414static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
415 int id)
416{
417 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
418
419 if (IS_ERR(ipcp))
420 return ERR_CAST(ipcp);
421
422 return container_of(ipcp, struct sem_array, sem_perm);
423}
424
425static inline void sem_lock_and_putref(struct sem_array *sma)
426{
427 sem_lock(sma, NULL, -1);
428 ipc_rcu_putref(sma, ipc_rcu_free);
429}
430
431static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
432{
433 ipc_rmid(&sem_ids(ns), &s->sem_perm);
434}
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468#define IN_WAKEUP 1
469
470
471
472
473
474
475
476
477
478static int newary(struct ipc_namespace *ns, struct ipc_params *params)
479{
480 int id;
481 int retval;
482 struct sem_array *sma;
483 int size;
484 key_t key = params->key;
485 int nsems = params->u.nsems;
486 int semflg = params->flg;
487 int i;
488
489 if (!nsems)
490 return -EINVAL;
491 if (ns->used_sems + nsems > ns->sc_semmns)
492 return -ENOSPC;
493
494 size = sizeof (*sma) + nsems * sizeof (struct sem);
495 sma = ipc_rcu_alloc(size);
496 if (!sma) {
497 return -ENOMEM;
498 }
499 memset (sma, 0, size);
500
501 sma->sem_perm.mode = (semflg & S_IRWXUGO);
502 sma->sem_perm.key = key;
503
504 sma->sem_perm.security = NULL;
505 retval = security_sem_alloc(sma);
506 if (retval) {
507 ipc_rcu_putref(sma, ipc_rcu_free);
508 return retval;
509 }
510
511 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
512 if (id < 0) {
513 ipc_rcu_putref(sma, sem_rcu_free);
514 return id;
515 }
516 ns->used_sems += nsems;
517
518 sma->sem_base = (struct sem *) &sma[1];
519
520 for (i = 0; i < nsems; i++) {
521 INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
522 INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
523 spin_lock_init(&sma->sem_base[i].lock);
524 }
525
526 sma->complex_count = 0;
527 INIT_LIST_HEAD(&sma->pending_alter);
528 INIT_LIST_HEAD(&sma->pending_const);
529 INIT_LIST_HEAD(&sma->list_id);
530 sma->sem_nsems = nsems;
531 sma->sem_ctime = get_seconds();
532 sem_unlock(sma, -1);
533 rcu_read_unlock();
534
535 return sma->sem_perm.id;
536}
537
538
539
540
541
542static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
543{
544 struct sem_array *sma;
545
546 sma = container_of(ipcp, struct sem_array, sem_perm);
547 return security_sem_associate(sma, semflg);
548}
549
550
551
552
553static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
554 struct ipc_params *params)
555{
556 struct sem_array *sma;
557
558 sma = container_of(ipcp, struct sem_array, sem_perm);
559 if (params->u.nsems > sma->sem_nsems)
560 return -EINVAL;
561
562 return 0;
563}
564
565SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
566{
567 struct ipc_namespace *ns;
568 struct ipc_ops sem_ops;
569 struct ipc_params sem_params;
570
571 ns = current->nsproxy->ipc_ns;
572
573 if (nsems < 0 || nsems > ns->sc_semmsl)
574 return -EINVAL;
575
576 sem_ops.getnew = newary;
577 sem_ops.associate = sem_security;
578 sem_ops.more_checks = sem_more_checks;
579
580 sem_params.key = key;
581 sem_params.flg = semflg;
582 sem_params.u.nsems = nsems;
583
584 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
585}
586
587
588
589
590
591
592
593
594
595
596
597
598
599static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
600 int nsops, struct sem_undo *un, int pid)
601{
602 int result, sem_op;
603 struct sembuf *sop;
604 struct sem * curr;
605
606 for (sop = sops; sop < sops + nsops; sop++) {
607 curr = sma->sem_base + sop->sem_num;
608 sem_op = sop->sem_op;
609 result = curr->semval;
610
611 if (!sem_op && result)
612 goto would_block;
613
614 result += sem_op;
615 if (result < 0)
616 goto would_block;
617 if (result > SEMVMX)
618 goto out_of_range;
619 if (sop->sem_flg & SEM_UNDO) {
620 int undo = un->semadj[sop->sem_num] - sem_op;
621
622
623
624 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
625 goto out_of_range;
626 }
627 curr->semval = result;
628 }
629
630 sop--;
631 while (sop >= sops) {
632 sma->sem_base[sop->sem_num].sempid = pid;
633 if (sop->sem_flg & SEM_UNDO)
634 un->semadj[sop->sem_num] -= sop->sem_op;
635 sop--;
636 }
637
638 return 0;
639
640out_of_range:
641 result = -ERANGE;
642 goto undo;
643
644would_block:
645 if (sop->sem_flg & IPC_NOWAIT)
646 result = -EAGAIN;
647 else
648 result = 1;
649
650undo:
651 sop--;
652 while (sop >= sops) {
653 sma->sem_base[sop->sem_num].semval -= sop->sem_op;
654 sop--;
655 }
656
657 return result;
658}
659
660
661
662
663
664
665
666static void wake_up_sem_queue_prepare(struct list_head *pt,
667 struct sem_queue *q, int error)
668{
669 if (list_empty(pt)) {
670
671
672
673
674 preempt_disable();
675 }
676 q->status = IN_WAKEUP;
677 q->pid = error;
678
679 list_add_tail(&q->list, pt);
680}
681
682
683
684
685
686
687
688
689
690
691static void wake_up_sem_queue_do(struct list_head *pt)
692{
693 struct sem_queue *q, *t;
694 int did_something;
695
696 did_something = !list_empty(pt);
697 list_for_each_entry_safe(q, t, pt, list) {
698 wake_up_process(q->sleeper);
699
700 smp_wmb();
701 q->status = q->pid;
702 }
703 if (did_something)
704 preempt_enable();
705}
706
707static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
708{
709 list_del(&q->list);
710 if (q->nsops > 1)
711 sma->complex_count--;
712}
713
714
715
716
717
718
719
720
721
722
723
724static int check_restart(struct sem_array *sma, struct sem_queue *q)
725{
726
727 if (!list_empty(&sma->pending_alter))
728 return 1;
729
730
731 if (q->nsops > 1)
732 return 1;
733
734
735
736
737
738
739
740
741
742
743
744
745 return 0;
746}
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762static int wake_const_ops(struct sem_array *sma, int semnum,
763 struct list_head *pt)
764{
765 struct sem_queue *q;
766 struct list_head *walk;
767 struct list_head *pending_list;
768 int semop_completed = 0;
769
770 if (semnum == -1)
771 pending_list = &sma->pending_const;
772 else
773 pending_list = &sma->sem_base[semnum].pending_const;
774
775 walk = pending_list->next;
776 while (walk != pending_list) {
777 int error;
778
779 q = container_of(walk, struct sem_queue, list);
780 walk = walk->next;
781
782 error = perform_atomic_semop(sma, q->sops, q->nsops,
783 q->undo, q->pid);
784
785 if (error <= 0) {
786
787
788 unlink_queue(sma, q);
789
790 wake_up_sem_queue_prepare(pt, q, error);
791 if (error == 0)
792 semop_completed = 1;
793 }
794 }
795 return semop_completed;
796}
797
798
799
800
801
802
803
804
805
806
807
808
809
810static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
811 int nsops, struct list_head *pt)
812{
813 int i;
814 int semop_completed = 0;
815 int got_zero = 0;
816
817
818 if (sops) {
819 for (i = 0; i < nsops; i++) {
820 int num = sops[i].sem_num;
821
822 if (sma->sem_base[num].semval == 0) {
823 got_zero = 1;
824 semop_completed |= wake_const_ops(sma, num, pt);
825 }
826 }
827 } else {
828
829
830
831
832 for (i = 0; i < sma->sem_nsems; i++) {
833 if (sma->sem_base[i].semval == 0) {
834 got_zero = 1;
835 semop_completed |= wake_const_ops(sma, i, pt);
836 }
837 }
838 }
839
840
841
842
843 if (got_zero)
844 semop_completed |= wake_const_ops(sma, -1, pt);
845
846 return semop_completed;
847}
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
867{
868 struct sem_queue *q;
869 struct list_head *walk;
870 struct list_head *pending_list;
871 int semop_completed = 0;
872
873 if (semnum == -1)
874 pending_list = &sma->pending_alter;
875 else
876 pending_list = &sma->sem_base[semnum].pending_alter;
877
878again:
879 walk = pending_list->next;
880 while (walk != pending_list) {
881 int error, restart;
882
883 q = container_of(walk, struct sem_queue, list);
884 walk = walk->next;
885
886
887
888
889
890
891
892
893 if (semnum != -1 && sma->sem_base[semnum].semval == 0)
894 break;
895
896 error = perform_atomic_semop(sma, q->sops, q->nsops,
897 q->undo, q->pid);
898
899
900 if (error > 0)
901 continue;
902
903 unlink_queue(sma, q);
904
905 if (error) {
906 restart = 0;
907 } else {
908 semop_completed = 1;
909 do_smart_wakeup_zero(sma, q->sops, q->nsops, pt);
910 restart = check_restart(sma, q);
911 }
912
913 wake_up_sem_queue_prepare(pt, q, error);
914 if (restart)
915 goto again;
916 }
917 return semop_completed;
918}
919
920
921
922
923
924
925
926
927
928static void set_semotime(struct sem_array *sma, struct sembuf *sops)
929{
930 if (sops == NULL) {
931 sma->sem_base[0].sem_otime = get_seconds();
932 } else {
933 sma->sem_base[sops[0].sem_num].sem_otime =
934 get_seconds();
935 }
936}
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
953 int otime, struct list_head *pt)
954{
955 int i;
956
957 otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
958
959 if (!list_empty(&sma->pending_alter)) {
960
961 otime |= update_queue(sma, -1, pt);
962 } else {
963 if (!sops) {
964
965
966
967
968 for (i = 0; i < sma->sem_nsems; i++)
969 otime |= update_queue(sma, i, pt);
970 } else {
971
972
973
974
975
976
977
978
979
980 for (i = 0; i < nsops; i++) {
981 if (sops[i].sem_op > 0) {
982 otime |= update_queue(sma,
983 sops[i].sem_num, pt);
984 }
985 }
986 }
987 }
988 if (otime)
989 set_semotime(sma, sops);
990}
991
992
993
994
995
996
997
998
999
1000
1001static int count_semncnt (struct sem_array * sma, ushort semnum)
1002{
1003 int semncnt;
1004 struct sem_queue * q;
1005
1006 semncnt = 0;
1007 list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) {
1008 struct sembuf * sops = q->sops;
1009 BUG_ON(sops->sem_num != semnum);
1010 if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT))
1011 semncnt++;
1012 }
1013
1014 list_for_each_entry(q, &sma->pending_alter, list) {
1015 struct sembuf * sops = q->sops;
1016 int nsops = q->nsops;
1017 int i;
1018 for (i = 0; i < nsops; i++)
1019 if (sops[i].sem_num == semnum
1020 && (sops[i].sem_op < 0)
1021 && !(sops[i].sem_flg & IPC_NOWAIT))
1022 semncnt++;
1023 }
1024 return semncnt;
1025}
1026
1027static int count_semzcnt (struct sem_array * sma, ushort semnum)
1028{
1029 int semzcnt;
1030 struct sem_queue * q;
1031
1032 semzcnt = 0;
1033 list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) {
1034 struct sembuf * sops = q->sops;
1035 BUG_ON(sops->sem_num != semnum);
1036 if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT))
1037 semzcnt++;
1038 }
1039
1040 list_for_each_entry(q, &sma->pending_const, list) {
1041 struct sembuf * sops = q->sops;
1042 int nsops = q->nsops;
1043 int i;
1044 for (i = 0; i < nsops; i++)
1045 if (sops[i].sem_num == semnum
1046 && (sops[i].sem_op == 0)
1047 && !(sops[i].sem_flg & IPC_NOWAIT))
1048 semzcnt++;
1049 }
1050 return semzcnt;
1051}
1052
1053
1054
1055
1056
1057static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1058{
1059 struct sem_undo *un, *tu;
1060 struct sem_queue *q, *tq;
1061 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1062 struct list_head tasks;
1063 int i;
1064
1065
1066 ipc_assert_locked_object(&sma->sem_perm);
1067 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1068 list_del(&un->list_id);
1069 spin_lock(&un->ulp->lock);
1070 un->semid = -1;
1071 list_del_rcu(&un->list_proc);
1072 spin_unlock(&un->ulp->lock);
1073 kfree_rcu(un, rcu);
1074 }
1075
1076
1077 INIT_LIST_HEAD(&tasks);
1078 list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1079 unlink_queue(sma, q);
1080 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1081 }
1082
1083 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1084 unlink_queue(sma, q);
1085 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1086 }
1087 for (i = 0; i < sma->sem_nsems; i++) {
1088 struct sem *sem = sma->sem_base + i;
1089 list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1090 unlink_queue(sma, q);
1091 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1092 }
1093 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1094 unlink_queue(sma, q);
1095 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1096 }
1097 }
1098
1099
1100 sem_rmid(ns, sma);
1101 sem_unlock(sma, -1);
1102 rcu_read_unlock();
1103
1104 wake_up_sem_queue_do(&tasks);
1105 ns->used_sems -= sma->sem_nsems;
1106 ipc_rcu_putref(sma, sem_rcu_free);
1107}
1108
1109static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1110{
1111 switch(version) {
1112 case IPC_64:
1113 return copy_to_user(buf, in, sizeof(*in));
1114 case IPC_OLD:
1115 {
1116 struct semid_ds out;
1117
1118 memset(&out, 0, sizeof(out));
1119
1120 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1121
1122 out.sem_otime = in->sem_otime;
1123 out.sem_ctime = in->sem_ctime;
1124 out.sem_nsems = in->sem_nsems;
1125
1126 return copy_to_user(buf, &out, sizeof(out));
1127 }
1128 default:
1129 return -EINVAL;
1130 }
1131}
1132
1133static time_t get_semotime(struct sem_array *sma)
1134{
1135 int i;
1136 time_t res;
1137
1138 res = sma->sem_base[0].sem_otime;
1139 for (i = 1; i < sma->sem_nsems; i++) {
1140 time_t to = sma->sem_base[i].sem_otime;
1141
1142 if (to > res)
1143 res = to;
1144 }
1145 return res;
1146}
1147
1148static int semctl_nolock(struct ipc_namespace *ns, int semid,
1149 int cmd, int version, void __user *p)
1150{
1151 int err;
1152 struct sem_array *sma;
1153
1154 switch(cmd) {
1155 case IPC_INFO:
1156 case SEM_INFO:
1157 {
1158 struct seminfo seminfo;
1159 int max_id;
1160
1161 err = security_sem_semctl(NULL, cmd);
1162 if (err)
1163 return err;
1164
1165 memset(&seminfo,0,sizeof(seminfo));
1166 seminfo.semmni = ns->sc_semmni;
1167 seminfo.semmns = ns->sc_semmns;
1168 seminfo.semmsl = ns->sc_semmsl;
1169 seminfo.semopm = ns->sc_semopm;
1170 seminfo.semvmx = SEMVMX;
1171 seminfo.semmnu = SEMMNU;
1172 seminfo.semmap = SEMMAP;
1173 seminfo.semume = SEMUME;
1174 down_read(&sem_ids(ns).rwsem);
1175 if (cmd == SEM_INFO) {
1176 seminfo.semusz = sem_ids(ns).in_use;
1177 seminfo.semaem = ns->used_sems;
1178 } else {
1179 seminfo.semusz = SEMUSZ;
1180 seminfo.semaem = SEMAEM;
1181 }
1182 max_id = ipc_get_maxid(&sem_ids(ns));
1183 up_read(&sem_ids(ns).rwsem);
1184 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1185 return -EFAULT;
1186 return (max_id < 0) ? 0: max_id;
1187 }
1188 case IPC_STAT:
1189 case SEM_STAT:
1190 {
1191 struct semid64_ds tbuf;
1192 int id = 0;
1193
1194 memset(&tbuf, 0, sizeof(tbuf));
1195
1196 rcu_read_lock();
1197 if (cmd == SEM_STAT) {
1198 sma = sem_obtain_object(ns, semid);
1199 if (IS_ERR(sma)) {
1200 err = PTR_ERR(sma);
1201 goto out_unlock;
1202 }
1203 id = sma->sem_perm.id;
1204 } else {
1205 sma = sem_obtain_object_check(ns, semid);
1206 if (IS_ERR(sma)) {
1207 err = PTR_ERR(sma);
1208 goto out_unlock;
1209 }
1210 }
1211
1212 err = -EACCES;
1213 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1214 goto out_unlock;
1215
1216 err = security_sem_semctl(sma, cmd);
1217 if (err)
1218 goto out_unlock;
1219
1220 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1221 tbuf.sem_otime = get_semotime(sma);
1222 tbuf.sem_ctime = sma->sem_ctime;
1223 tbuf.sem_nsems = sma->sem_nsems;
1224 rcu_read_unlock();
1225 if (copy_semid_to_user(p, &tbuf, version))
1226 return -EFAULT;
1227 return id;
1228 }
1229 default:
1230 return -EINVAL;
1231 }
1232out_unlock:
1233 rcu_read_unlock();
1234 return err;
1235}
1236
1237static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1238 unsigned long arg)
1239{
1240 struct sem_undo *un;
1241 struct sem_array *sma;
1242 struct sem* curr;
1243 int err;
1244 struct list_head tasks;
1245 int val;
1246#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1247
1248 val = arg >> 32;
1249#else
1250
1251 val = arg;
1252#endif
1253
1254 if (val > SEMVMX || val < 0)
1255 return -ERANGE;
1256
1257 INIT_LIST_HEAD(&tasks);
1258
1259 rcu_read_lock();
1260 sma = sem_obtain_object_check(ns, semid);
1261 if (IS_ERR(sma)) {
1262 rcu_read_unlock();
1263 return PTR_ERR(sma);
1264 }
1265
1266 if (semnum < 0 || semnum >= sma->sem_nsems) {
1267 rcu_read_unlock();
1268 return -EINVAL;
1269 }
1270
1271
1272 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1273 rcu_read_unlock();
1274 return -EACCES;
1275 }
1276
1277 err = security_sem_semctl(sma, SETVAL);
1278 if (err) {
1279 rcu_read_unlock();
1280 return -EACCES;
1281 }
1282
1283 sem_lock(sma, NULL, -1);
1284
1285 curr = &sma->sem_base[semnum];
1286
1287 ipc_assert_locked_object(&sma->sem_perm);
1288 list_for_each_entry(un, &sma->list_id, list_id)
1289 un->semadj[semnum] = 0;
1290
1291 curr->semval = val;
1292 curr->sempid = task_tgid_vnr(current);
1293 sma->sem_ctime = get_seconds();
1294
1295 do_smart_update(sma, NULL, 0, 0, &tasks);
1296 sem_unlock(sma, -1);
1297 rcu_read_unlock();
1298 wake_up_sem_queue_do(&tasks);
1299 return 0;
1300}
1301
1302static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1303 int cmd, void __user *p)
1304{
1305 struct sem_array *sma;
1306 struct sem* curr;
1307 int err, nsems;
1308 ushort fast_sem_io[SEMMSL_FAST];
1309 ushort* sem_io = fast_sem_io;
1310 struct list_head tasks;
1311
1312 INIT_LIST_HEAD(&tasks);
1313
1314 rcu_read_lock();
1315 sma = sem_obtain_object_check(ns, semid);
1316 if (IS_ERR(sma)) {
1317 rcu_read_unlock();
1318 return PTR_ERR(sma);
1319 }
1320
1321 nsems = sma->sem_nsems;
1322
1323 err = -EACCES;
1324 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1325 goto out_rcu_wakeup;
1326
1327 err = security_sem_semctl(sma, cmd);
1328 if (err)
1329 goto out_rcu_wakeup;
1330
1331 err = -EACCES;
1332 switch (cmd) {
1333 case GETALL:
1334 {
1335 ushort __user *array = p;
1336 int i;
1337
1338 sem_lock(sma, NULL, -1);
1339 if(nsems > SEMMSL_FAST) {
1340 if (!ipc_rcu_getref(sma)) {
1341 sem_unlock(sma, -1);
1342 rcu_read_unlock();
1343 err = -EIDRM;
1344 goto out_free;
1345 }
1346 sem_unlock(sma, -1);
1347 rcu_read_unlock();
1348 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1349 if(sem_io == NULL) {
1350 ipc_rcu_putref(sma, ipc_rcu_free);
1351 return -ENOMEM;
1352 }
1353
1354 rcu_read_lock();
1355 sem_lock_and_putref(sma);
1356 if (sma->sem_perm.deleted) {
1357 sem_unlock(sma, -1);
1358 rcu_read_unlock();
1359 err = -EIDRM;
1360 goto out_free;
1361 }
1362 }
1363 for (i = 0; i < sma->sem_nsems; i++)
1364 sem_io[i] = sma->sem_base[i].semval;
1365 sem_unlock(sma, -1);
1366 rcu_read_unlock();
1367 err = 0;
1368 if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1369 err = -EFAULT;
1370 goto out_free;
1371 }
1372 case SETALL:
1373 {
1374 int i;
1375 struct sem_undo *un;
1376
1377 if (!ipc_rcu_getref(sma)) {
1378 rcu_read_unlock();
1379 return -EIDRM;
1380 }
1381 rcu_read_unlock();
1382
1383 if(nsems > SEMMSL_FAST) {
1384 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1385 if(sem_io == NULL) {
1386 ipc_rcu_putref(sma, ipc_rcu_free);
1387 return -ENOMEM;
1388 }
1389 }
1390
1391 if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
1392 ipc_rcu_putref(sma, ipc_rcu_free);
1393 err = -EFAULT;
1394 goto out_free;
1395 }
1396
1397 for (i = 0; i < nsems; i++) {
1398 if (sem_io[i] > SEMVMX) {
1399 ipc_rcu_putref(sma, ipc_rcu_free);
1400 err = -ERANGE;
1401 goto out_free;
1402 }
1403 }
1404 rcu_read_lock();
1405 sem_lock_and_putref(sma);
1406 if (sma->sem_perm.deleted) {
1407 sem_unlock(sma, -1);
1408 rcu_read_unlock();
1409 err = -EIDRM;
1410 goto out_free;
1411 }
1412
1413 for (i = 0; i < nsems; i++)
1414 sma->sem_base[i].semval = sem_io[i];
1415
1416 ipc_assert_locked_object(&sma->sem_perm);
1417 list_for_each_entry(un, &sma->list_id, list_id) {
1418 for (i = 0; i < nsems; i++)
1419 un->semadj[i] = 0;
1420 }
1421 sma->sem_ctime = get_seconds();
1422
1423 do_smart_update(sma, NULL, 0, 0, &tasks);
1424 err = 0;
1425 goto out_unlock;
1426 }
1427
1428 }
1429 err = -EINVAL;
1430 if (semnum < 0 || semnum >= nsems)
1431 goto out_rcu_wakeup;
1432
1433 sem_lock(sma, NULL, -1);
1434 curr = &sma->sem_base[semnum];
1435
1436 switch (cmd) {
1437 case GETVAL:
1438 err = curr->semval;
1439 goto out_unlock;
1440 case GETPID:
1441 err = curr->sempid;
1442 goto out_unlock;
1443 case GETNCNT:
1444 err = count_semncnt(sma,semnum);
1445 goto out_unlock;
1446 case GETZCNT:
1447 err = count_semzcnt(sma,semnum);
1448 goto out_unlock;
1449 }
1450
1451out_unlock:
1452 sem_unlock(sma, -1);
1453out_rcu_wakeup:
1454 rcu_read_unlock();
1455 wake_up_sem_queue_do(&tasks);
1456out_free:
1457 if(sem_io != fast_sem_io)
1458 ipc_free(sem_io, sizeof(ushort)*nsems);
1459 return err;
1460}
1461
1462static inline unsigned long
1463copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1464{
1465 switch(version) {
1466 case IPC_64:
1467 if (copy_from_user(out, buf, sizeof(*out)))
1468 return -EFAULT;
1469 return 0;
1470 case IPC_OLD:
1471 {
1472 struct semid_ds tbuf_old;
1473
1474 if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1475 return -EFAULT;
1476
1477 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1478 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1479 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1480
1481 return 0;
1482 }
1483 default:
1484 return -EINVAL;
1485 }
1486}
1487
1488
1489
1490
1491
1492
1493static int semctl_down(struct ipc_namespace *ns, int semid,
1494 int cmd, int version, void __user *p)
1495{
1496 struct sem_array *sma;
1497 int err;
1498 struct semid64_ds semid64;
1499 struct kern_ipc_perm *ipcp;
1500
1501 if(cmd == IPC_SET) {
1502 if (copy_semid_from_user(&semid64, p, version))
1503 return -EFAULT;
1504 }
1505
1506 down_write(&sem_ids(ns).rwsem);
1507 rcu_read_lock();
1508
1509 ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1510 &semid64.sem_perm, 0);
1511 if (IS_ERR(ipcp)) {
1512 err = PTR_ERR(ipcp);
1513 goto out_unlock1;
1514 }
1515
1516 sma = container_of(ipcp, struct sem_array, sem_perm);
1517
1518 err = security_sem_semctl(sma, cmd);
1519 if (err)
1520 goto out_unlock1;
1521
1522 switch (cmd) {
1523 case IPC_RMID:
1524 sem_lock(sma, NULL, -1);
1525
1526 freeary(ns, ipcp);
1527 goto out_up;
1528 case IPC_SET:
1529 sem_lock(sma, NULL, -1);
1530 err = ipc_update_perm(&semid64.sem_perm, ipcp);
1531 if (err)
1532 goto out_unlock0;
1533 sma->sem_ctime = get_seconds();
1534 break;
1535 default:
1536 err = -EINVAL;
1537 goto out_unlock1;
1538 }
1539
1540out_unlock0:
1541 sem_unlock(sma, -1);
1542out_unlock1:
1543 rcu_read_unlock();
1544out_up:
1545 up_write(&sem_ids(ns).rwsem);
1546 return err;
1547}
1548
1549SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1550{
1551 int version;
1552 struct ipc_namespace *ns;
1553 void __user *p = (void __user *)arg;
1554
1555 if (semid < 0)
1556 return -EINVAL;
1557
1558 version = ipc_parse_version(&cmd);
1559 ns = current->nsproxy->ipc_ns;
1560
1561 switch(cmd) {
1562 case IPC_INFO:
1563 case SEM_INFO:
1564 case IPC_STAT:
1565 case SEM_STAT:
1566 return semctl_nolock(ns, semid, cmd, version, p);
1567 case GETALL:
1568 case GETVAL:
1569 case GETPID:
1570 case GETNCNT:
1571 case GETZCNT:
1572 case SETALL:
1573 return semctl_main(ns, semid, semnum, cmd, p);
1574 case SETVAL:
1575 return semctl_setval(ns, semid, semnum, arg);
1576 case IPC_RMID:
1577 case IPC_SET:
1578 return semctl_down(ns, semid, cmd, version, p);
1579 default:
1580 return -EINVAL;
1581 }
1582}
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595static inline int get_undo_list(struct sem_undo_list **undo_listp)
1596{
1597 struct sem_undo_list *undo_list;
1598
1599 undo_list = current->sysvsem.undo_list;
1600 if (!undo_list) {
1601 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1602 if (undo_list == NULL)
1603 return -ENOMEM;
1604 spin_lock_init(&undo_list->lock);
1605 atomic_set(&undo_list->refcnt, 1);
1606 INIT_LIST_HEAD(&undo_list->list_proc);
1607
1608 current->sysvsem.undo_list = undo_list;
1609 }
1610 *undo_listp = undo_list;
1611 return 0;
1612}
1613
1614static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1615{
1616 struct sem_undo *un;
1617
1618 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1619 if (un->semid == semid)
1620 return un;
1621 }
1622 return NULL;
1623}
1624
1625static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1626{
1627 struct sem_undo *un;
1628
1629 assert_spin_locked(&ulp->lock);
1630
1631 un = __lookup_undo(ulp, semid);
1632 if (un) {
1633 list_del_rcu(&un->list_proc);
1634 list_add_rcu(&un->list_proc, &ulp->list_proc);
1635 }
1636 return un;
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1651{
1652 struct sem_array *sma;
1653 struct sem_undo_list *ulp;
1654 struct sem_undo *un, *new;
1655 int nsems, error;
1656
1657 error = get_undo_list(&ulp);
1658 if (error)
1659 return ERR_PTR(error);
1660
1661 rcu_read_lock();
1662 spin_lock(&ulp->lock);
1663 un = lookup_undo(ulp, semid);
1664 spin_unlock(&ulp->lock);
1665 if (likely(un!=NULL))
1666 goto out;
1667
1668
1669
1670 sma = sem_obtain_object_check(ns, semid);
1671 if (IS_ERR(sma)) {
1672 rcu_read_unlock();
1673 return ERR_CAST(sma);
1674 }
1675
1676 nsems = sma->sem_nsems;
1677 if (!ipc_rcu_getref(sma)) {
1678 rcu_read_unlock();
1679 un = ERR_PTR(-EIDRM);
1680 goto out;
1681 }
1682 rcu_read_unlock();
1683
1684
1685 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1686 if (!new) {
1687 ipc_rcu_putref(sma, ipc_rcu_free);
1688 return ERR_PTR(-ENOMEM);
1689 }
1690
1691
1692 rcu_read_lock();
1693 sem_lock_and_putref(sma);
1694 if (sma->sem_perm.deleted) {
1695 sem_unlock(sma, -1);
1696 rcu_read_unlock();
1697 kfree(new);
1698 un = ERR_PTR(-EIDRM);
1699 goto out;
1700 }
1701 spin_lock(&ulp->lock);
1702
1703
1704
1705
1706 un = lookup_undo(ulp, semid);
1707 if (un) {
1708 kfree(new);
1709 goto success;
1710 }
1711
1712 new->semadj = (short *) &new[1];
1713 new->ulp = ulp;
1714 new->semid = semid;
1715 assert_spin_locked(&ulp->lock);
1716 list_add_rcu(&new->list_proc, &ulp->list_proc);
1717 ipc_assert_locked_object(&sma->sem_perm);
1718 list_add(&new->list_id, &sma->list_id);
1719 un = new;
1720
1721success:
1722 spin_unlock(&ulp->lock);
1723 sem_unlock(sma, -1);
1724out:
1725 return un;
1726}
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741static int get_queue_result(struct sem_queue *q)
1742{
1743 int error;
1744
1745 error = q->status;
1746 while (unlikely(error == IN_WAKEUP)) {
1747 cpu_relax();
1748 error = q->status;
1749 }
1750
1751 return error;
1752}
1753
1754SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1755 unsigned, nsops, const struct timespec __user *, timeout)
1756{
1757 int error = -EINVAL;
1758 struct sem_array *sma;
1759 struct sembuf fast_sops[SEMOPM_FAST];
1760 struct sembuf* sops = fast_sops, *sop;
1761 struct sem_undo *un;
1762 int undos = 0, alter = 0, max, locknum;
1763 struct sem_queue queue;
1764 unsigned long jiffies_left = 0;
1765 struct ipc_namespace *ns;
1766 struct list_head tasks;
1767
1768 ns = current->nsproxy->ipc_ns;
1769
1770 if (nsops < 1 || semid < 0)
1771 return -EINVAL;
1772 if (nsops > ns->sc_semopm)
1773 return -E2BIG;
1774 if(nsops > SEMOPM_FAST) {
1775 sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
1776 if(sops==NULL)
1777 return -ENOMEM;
1778 }
1779 if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
1780 error=-EFAULT;
1781 goto out_free;
1782 }
1783 if (timeout) {
1784 struct timespec _timeout;
1785 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1786 error = -EFAULT;
1787 goto out_free;
1788 }
1789 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1790 _timeout.tv_nsec >= 1000000000L) {
1791 error = -EINVAL;
1792 goto out_free;
1793 }
1794 jiffies_left = timespec_to_jiffies(&_timeout);
1795 }
1796 max = 0;
1797 for (sop = sops; sop < sops + nsops; sop++) {
1798 if (sop->sem_num >= max)
1799 max = sop->sem_num;
1800 if (sop->sem_flg & SEM_UNDO)
1801 undos = 1;
1802 if (sop->sem_op != 0)
1803 alter = 1;
1804 }
1805
1806 INIT_LIST_HEAD(&tasks);
1807
1808 if (undos) {
1809
1810 un = find_alloc_undo(ns, semid);
1811 if (IS_ERR(un)) {
1812 error = PTR_ERR(un);
1813 goto out_free;
1814 }
1815 } else {
1816 un = NULL;
1817 rcu_read_lock();
1818 }
1819
1820 sma = sem_obtain_object_check(ns, semid);
1821 if (IS_ERR(sma)) {
1822 rcu_read_unlock();
1823 error = PTR_ERR(sma);
1824 goto out_free;
1825 }
1826
1827 error = -EFBIG;
1828 if (max >= sma->sem_nsems)
1829 goto out_rcu_wakeup;
1830
1831 error = -EACCES;
1832 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1833 goto out_rcu_wakeup;
1834
1835 error = security_sem_semop(sma, sops, nsops, alter);
1836 if (error)
1837 goto out_rcu_wakeup;
1838
1839
1840
1841
1842
1843
1844
1845
1846 error = -EIDRM;
1847 locknum = sem_lock(sma, sops, nsops);
1848 if (un && un->semid == -1)
1849 goto out_unlock_free;
1850
1851 error = perform_atomic_semop(sma, sops, nsops, un,
1852 task_tgid_vnr(current));
1853 if (error == 0) {
1854
1855
1856
1857 if (alter)
1858 do_smart_update(sma, sops, nsops, 1, &tasks);
1859 else
1860 set_semotime(sma, sops);
1861 }
1862 if (error <= 0)
1863 goto out_unlock_free;
1864
1865
1866
1867
1868
1869 queue.sops = sops;
1870 queue.nsops = nsops;
1871 queue.undo = un;
1872 queue.pid = task_tgid_vnr(current);
1873 queue.alter = alter;
1874
1875 if (nsops == 1) {
1876 struct sem *curr;
1877 curr = &sma->sem_base[sops->sem_num];
1878
1879 if (alter) {
1880 if (sma->complex_count) {
1881 list_add_tail(&queue.list,
1882 &sma->pending_alter);
1883 } else {
1884
1885 list_add_tail(&queue.list,
1886 &curr->pending_alter);
1887 }
1888 } else {
1889 list_add_tail(&queue.list, &curr->pending_const);
1890 }
1891 } else {
1892 if (!sma->complex_count)
1893 merge_queues(sma);
1894
1895 if (alter)
1896 list_add_tail(&queue.list, &sma->pending_alter);
1897 else
1898 list_add_tail(&queue.list, &sma->pending_const);
1899
1900 sma->complex_count++;
1901 }
1902
1903 queue.status = -EINTR;
1904 queue.sleeper = current;
1905
1906sleep_again:
1907 current->state = TASK_INTERRUPTIBLE;
1908 sem_unlock(sma, locknum);
1909 rcu_read_unlock();
1910
1911 if (timeout)
1912 jiffies_left = schedule_timeout(jiffies_left);
1913 else
1914 schedule();
1915
1916 error = get_queue_result(&queue);
1917
1918 if (error != -EINTR) {
1919
1920
1921
1922
1923
1924
1925
1926 smp_mb();
1927
1928 goto out_free;
1929 }
1930
1931 rcu_read_lock();
1932 sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
1933
1934
1935
1936
1937 error = get_queue_result(&queue);
1938
1939
1940
1941
1942 if (IS_ERR(sma)) {
1943 rcu_read_unlock();
1944 goto out_free;
1945 }
1946
1947
1948
1949
1950
1951
1952
1953 if (error != -EINTR) {
1954 goto out_unlock_free;
1955 }
1956
1957
1958
1959
1960 if (timeout && jiffies_left == 0)
1961 error = -EAGAIN;
1962
1963
1964
1965
1966 if (error == -EINTR && !signal_pending(current))
1967 goto sleep_again;
1968
1969 unlink_queue(sma, &queue);
1970
1971out_unlock_free:
1972 sem_unlock(sma, locknum);
1973out_rcu_wakeup:
1974 rcu_read_unlock();
1975 wake_up_sem_queue_do(&tasks);
1976out_free:
1977 if(sops != fast_sops)
1978 kfree(sops);
1979 return error;
1980}
1981
1982SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
1983 unsigned, nsops)
1984{
1985 return sys_semtimedop(semid, tsops, nsops, NULL);
1986}
1987
1988
1989
1990
1991
1992int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1993{
1994 struct sem_undo_list *undo_list;
1995 int error;
1996
1997 if (clone_flags & CLONE_SYSVSEM) {
1998 error = get_undo_list(&undo_list);
1999 if (error)
2000 return error;
2001 atomic_inc(&undo_list->refcnt);
2002 tsk->sysvsem.undo_list = undo_list;
2003 } else
2004 tsk->sysvsem.undo_list = NULL;
2005
2006 return 0;
2007}
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021void exit_sem(struct task_struct *tsk)
2022{
2023 struct sem_undo_list *ulp;
2024
2025 ulp = tsk->sysvsem.undo_list;
2026 if (!ulp)
2027 return;
2028 tsk->sysvsem.undo_list = NULL;
2029
2030 if (!atomic_dec_and_test(&ulp->refcnt))
2031 return;
2032
2033 for (;;) {
2034 struct sem_array *sma;
2035 struct sem_undo *un;
2036 struct list_head tasks;
2037 int semid, i;
2038
2039 rcu_read_lock();
2040 un = list_entry_rcu(ulp->list_proc.next,
2041 struct sem_undo, list_proc);
2042 if (&un->list_proc == &ulp->list_proc)
2043 semid = -1;
2044 else
2045 semid = un->semid;
2046
2047 if (semid == -1) {
2048 rcu_read_unlock();
2049 break;
2050 }
2051
2052 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
2053
2054 if (IS_ERR(sma)) {
2055 rcu_read_unlock();
2056 continue;
2057 }
2058
2059 sem_lock(sma, NULL, -1);
2060 un = __lookup_undo(ulp, semid);
2061 if (un == NULL) {
2062
2063
2064
2065 sem_unlock(sma, -1);
2066 rcu_read_unlock();
2067 continue;
2068 }
2069
2070
2071 ipc_assert_locked_object(&sma->sem_perm);
2072 list_del(&un->list_id);
2073
2074 spin_lock(&ulp->lock);
2075 list_del_rcu(&un->list_proc);
2076 spin_unlock(&ulp->lock);
2077
2078
2079 for (i = 0; i < sma->sem_nsems; i++) {
2080 struct sem * semaphore = &sma->sem_base[i];
2081 if (un->semadj[i]) {
2082 semaphore->semval += un->semadj[i];
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096 if (semaphore->semval < 0)
2097 semaphore->semval = 0;
2098 if (semaphore->semval > SEMVMX)
2099 semaphore->semval = SEMVMX;
2100 semaphore->sempid = task_tgid_vnr(current);
2101 }
2102 }
2103
2104 INIT_LIST_HEAD(&tasks);
2105 do_smart_update(sma, NULL, 0, 1, &tasks);
2106 sem_unlock(sma, -1);
2107 rcu_read_unlock();
2108 wake_up_sem_queue_do(&tasks);
2109
2110 kfree_rcu(un, rcu);
2111 }
2112 kfree(ulp);
2113}
2114
2115#ifdef CONFIG_PROC_FS
2116static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2117{
2118 struct user_namespace *user_ns = seq_user_ns(s);
2119 struct sem_array *sma = it;
2120 time_t sem_otime;
2121
2122
2123
2124
2125
2126
2127
2128 sem_wait_array(sma);
2129
2130 sem_otime = get_semotime(sma);
2131
2132 return seq_printf(s,
2133 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2134 sma->sem_perm.key,
2135 sma->sem_perm.id,
2136 sma->sem_perm.mode,
2137 sma->sem_nsems,
2138 from_kuid_munged(user_ns, sma->sem_perm.uid),
2139 from_kgid_munged(user_ns, sma->sem_perm.gid),
2140 from_kuid_munged(user_ns, sma->sem_perm.cuid),
2141 from_kgid_munged(user_ns, sma->sem_perm.cgid),
2142 sem_otime,
2143 sma->sem_ctime);
2144}
2145#endif
2146