1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/mm.h>
28#include <linux/smp.h>
29#include <linux/smp_lock.h>
30#include <linux/errno.h>
31#include <linux/ptrace.h>
32#include <linux/user.h>
33#include <linux/security.h>
34#include <linux/audit.h>
35#include <linux/signal.h>
36#include <linux/elf.h>
37#include <linux/regset.h>
38#include <linux/tracehook.h>
39
40#include <asm/segment.h>
41#include <asm/page.h>
42#include <asm/pgtable.h>
43#include <asm/pgalloc.h>
44#include <asm/system.h>
45#include <asm/uaccess.h>
46#include <asm/unistd.h>
47#include "entry.h"
48
49#ifdef CONFIG_COMPAT
50#include "compat_ptrace.h"
51#endif
52
53enum s390_regset {
54 REGSET_GENERAL,
55 REGSET_FP,
56};
57
58static void
59FixPerRegisters(struct task_struct *task)
60{
61 struct pt_regs *regs;
62 per_struct *per_info;
63
64 regs = task_pt_regs(task);
65 per_info = (per_struct *) &task->thread.per_info;
66 per_info->control_regs.bits.em_instruction_fetch =
67 per_info->single_step | per_info->instruction_fetch;
68
69 if (per_info->single_step) {
70 per_info->control_regs.bits.starting_addr = 0;
71#ifdef CONFIG_COMPAT
72 if (test_thread_flag(TIF_31BIT))
73 per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
74 else
75#endif
76 per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
77 } else {
78 per_info->control_regs.bits.starting_addr =
79 per_info->starting_addr;
80 per_info->control_regs.bits.ending_addr =
81 per_info->ending_addr;
82 }
83
84
85
86
87 if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
88 regs->psw.mask |= PSW_MASK_PER;
89 else
90 regs->psw.mask &= ~PSW_MASK_PER;
91
92 if (per_info->control_regs.bits.em_storage_alteration)
93 per_info->control_regs.bits.storage_alt_space_ctl = 1;
94 else
95 per_info->control_regs.bits.storage_alt_space_ctl = 0;
96}
97
98void user_enable_single_step(struct task_struct *task)
99{
100 task->thread.per_info.single_step = 1;
101 FixPerRegisters(task);
102}
103
104void user_disable_single_step(struct task_struct *task)
105{
106 task->thread.per_info.single_step = 0;
107 FixPerRegisters(task);
108}
109
110
111
112
113
114
115void
116ptrace_disable(struct task_struct *child)
117{
118
119 user_disable_single_step(child);
120}
121
122#ifndef CONFIG_64BIT
123# define __ADDR_MASK 3
124#else
125# define __ADDR_MASK 7
126#endif
127
128
129
130
131
132
133
134
135
136
137static unsigned long __peek_user(struct task_struct *child, addr_t addr)
138{
139 struct user *dummy = NULL;
140 addr_t offset, tmp;
141
142 if (addr < (addr_t) &dummy->regs.acrs) {
143
144
145
146 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
147 if (addr == (addr_t) &dummy->regs.psw.mask)
148
149 tmp &= ~PSW_MASK_PER;
150
151 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
152
153
154
155 offset = addr - (addr_t) &dummy->regs.acrs;
156#ifdef CONFIG_64BIT
157
158
159
160
161
162 if (addr == (addr_t) &dummy->regs.acrs[15])
163 tmp = ((unsigned long) child->thread.acrs[15]) << 32;
164 else
165#endif
166 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
167
168 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
169
170
171
172 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
173
174 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
175
176
177
178
179 tmp = 0;
180
181 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
182
183
184
185 offset = addr - (addr_t) &dummy->regs.fp_regs;
186 tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
187 if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
188 tmp &= (unsigned long) FPC_VALID_MASK
189 << (BITS_PER_LONG - 32);
190
191 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
192
193
194
195 offset = addr - (addr_t) &dummy->regs.per_info;
196 tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset);
197
198 } else
199 tmp = 0;
200
201 return tmp;
202}
203
204static int
205peek_user(struct task_struct *child, addr_t addr, addr_t data)
206{
207 struct user *dummy = NULL;
208 addr_t tmp, mask;
209
210
211
212
213
214 mask = __ADDR_MASK;
215#ifdef CONFIG_64BIT
216 if (addr >= (addr_t) &dummy->regs.acrs &&
217 addr < (addr_t) &dummy->regs.orig_gpr2)
218 mask = 3;
219#endif
220 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
221 return -EIO;
222
223 tmp = __peek_user(child, addr);
224 return put_user(tmp, (addr_t __user *) data);
225}
226
227
228
229
230
231
232
233static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
234{
235 struct user *dummy = NULL;
236 addr_t offset;
237
238 if (addr < (addr_t) &dummy->regs.acrs) {
239
240
241
242 if (addr == (addr_t) &dummy->regs.psw.mask &&
243#ifdef CONFIG_COMPAT
244 data != PSW_MASK_MERGE(psw_user32_bits, data) &&
245#endif
246 data != PSW_MASK_MERGE(psw_user_bits, data))
247
248 return -EINVAL;
249#ifndef CONFIG_64BIT
250 if (addr == (addr_t) &dummy->regs.psw.addr)
251
252
253 data |= PSW_ADDR_AMODE;
254#endif
255 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
256
257 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
258
259
260
261 offset = addr - (addr_t) &dummy->regs.acrs;
262#ifdef CONFIG_64BIT
263
264
265
266
267
268
269 if (addr == (addr_t) &dummy->regs.acrs[15])
270 child->thread.acrs[15] = (unsigned int) (data >> 32);
271 else
272#endif
273 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
274
275 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
276
277
278
279 task_pt_regs(child)->orig_gpr2 = data;
280
281 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
282
283
284
285
286 return 0;
287
288 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
289
290
291
292 if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
293 (data & ~((unsigned long) FPC_VALID_MASK
294 << (BITS_PER_LONG - 32))) != 0)
295 return -EINVAL;
296 offset = addr - (addr_t) &dummy->regs.fp_regs;
297 *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
298
299 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
300
301
302
303 offset = addr - (addr_t) &dummy->regs.per_info;
304 *(addr_t *)((addr_t) &child->thread.per_info + offset) = data;
305
306 }
307
308 FixPerRegisters(child);
309 return 0;
310}
311
312static int
313poke_user(struct task_struct *child, addr_t addr, addr_t data)
314{
315 struct user *dummy = NULL;
316 addr_t mask;
317
318
319
320
321
322 mask = __ADDR_MASK;
323#ifdef CONFIG_64BIT
324 if (addr >= (addr_t) &dummy->regs.acrs &&
325 addr < (addr_t) &dummy->regs.orig_gpr2)
326 mask = 3;
327#endif
328 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
329 return -EIO;
330
331 return __poke_user(child, addr, data);
332}
333
334long arch_ptrace(struct task_struct *child, long request, long addr, long data)
335{
336 ptrace_area parea;
337 int copied, ret;
338
339 switch (request) {
340 case PTRACE_PEEKTEXT:
341 case PTRACE_PEEKDATA:
342
343 addr &= PSW_ADDR_INSN;
344
345 return generic_ptrace_peekdata(child, addr, data);
346
347 case PTRACE_PEEKUSR:
348
349 return peek_user(child, addr, data);
350
351 case PTRACE_POKETEXT:
352 case PTRACE_POKEDATA:
353
354 addr &= PSW_ADDR_INSN;
355
356 return generic_ptrace_pokedata(child, addr, data);
357
358 case PTRACE_POKEUSR:
359
360 return poke_user(child, addr, data);
361
362 case PTRACE_PEEKUSR_AREA:
363 case PTRACE_POKEUSR_AREA:
364 if (copy_from_user(&parea, (void __force __user *) addr,
365 sizeof(parea)))
366 return -EFAULT;
367 addr = parea.kernel_addr;
368 data = parea.process_addr;
369 copied = 0;
370 while (copied < parea.len) {
371 if (request == PTRACE_PEEKUSR_AREA)
372 ret = peek_user(child, addr, data);
373 else {
374 addr_t utmp;
375 if (get_user(utmp,
376 (addr_t __force __user *) data))
377 return -EFAULT;
378 ret = poke_user(child, addr, utmp);
379 }
380 if (ret)
381 return ret;
382 addr += sizeof(unsigned long);
383 data += sizeof(unsigned long);
384 copied += sizeof(unsigned long);
385 }
386 return 0;
387 }
388 return ptrace_request(child, request, addr, data);
389}
390
391#ifdef CONFIG_COMPAT
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
410{
411 struct user32 *dummy32 = NULL;
412 per_struct32 *dummy_per32 = NULL;
413 addr_t offset;
414 __u32 tmp;
415
416 if (addr < (addr_t) &dummy32->regs.acrs) {
417
418
419
420 if (addr == (addr_t) &dummy32->regs.psw.mask) {
421
422 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
423 tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
424 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
425
426 tmp = (__u32) task_pt_regs(child)->psw.addr |
427 PSW32_ADDR_AMODE31;
428 } else {
429
430 tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
431 addr*2 + 4);
432 }
433 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
434
435
436
437 offset = addr - (addr_t) &dummy32->regs.acrs;
438 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
439
440 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
441
442
443
444 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
445
446 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
447
448
449
450
451 tmp = 0;
452
453 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
454
455
456
457 offset = addr - (addr_t) &dummy32->regs.fp_regs;
458 tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
459
460 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
461
462
463
464 offset = addr - (addr_t) &dummy32->regs.per_info;
465
466 if ((offset >= (addr_t) &dummy_per32->control_regs &&
467 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
468 (offset >= (addr_t) &dummy_per32->starting_addr &&
469 offset <= (addr_t) &dummy_per32->ending_addr) ||
470 offset == (addr_t) &dummy_per32->lowcore.words.address)
471 offset = offset*2 + 4;
472 else
473 offset = offset*2;
474 tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
475
476 } else
477 tmp = 0;
478
479 return tmp;
480}
481
482static int peek_user_compat(struct task_struct *child,
483 addr_t addr, addr_t data)
484{
485 __u32 tmp;
486
487 if (!test_thread_flag(TIF_31BIT) ||
488 (addr & 3) || addr > sizeof(struct user) - 3)
489 return -EIO;
490
491 tmp = __peek_user_compat(child, addr);
492 return put_user(tmp, (__u32 __user *) data);
493}
494
495
496
497
498static int __poke_user_compat(struct task_struct *child,
499 addr_t addr, addr_t data)
500{
501 struct user32 *dummy32 = NULL;
502 per_struct32 *dummy_per32 = NULL;
503 __u32 tmp = (__u32) data;
504 addr_t offset;
505
506 if (addr < (addr_t) &dummy32->regs.acrs) {
507
508
509
510 if (addr == (addr_t) &dummy32->regs.psw.mask) {
511
512 if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
513
514 return -EINVAL;
515 task_pt_regs(child)->psw.mask =
516 PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
517 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
518
519 task_pt_regs(child)->psw.addr =
520 (__u64) tmp & PSW32_ADDR_INSN;
521 } else {
522
523 *(__u32*)((addr_t) &task_pt_regs(child)->psw
524 + addr*2 + 4) = tmp;
525 }
526 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
527
528
529
530 offset = addr - (addr_t) &dummy32->regs.acrs;
531 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
532
533 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
534
535
536
537 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
538
539 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
540
541
542
543
544 return 0;
545
546 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
547
548
549
550 if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
551 (tmp & ~FPC_VALID_MASK) != 0)
552
553 return -EINVAL;
554 offset = addr - (addr_t) &dummy32->regs.fp_regs;
555 *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
556
557 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
558
559
560
561 offset = addr - (addr_t) &dummy32->regs.per_info;
562
563
564
565
566
567
568
569
570 if ((offset >= (addr_t) &dummy_per32->control_regs &&
571 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
572 (offset >= (addr_t) &dummy_per32->starting_addr &&
573 offset <= (addr_t) &dummy_per32->ending_addr) ||
574 offset == (addr_t) &dummy_per32->lowcore.words.address)
575 offset = offset*2 + 4;
576 else
577 offset = offset*2;
578 *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
579
580 }
581
582 FixPerRegisters(child);
583 return 0;
584}
585
586static int poke_user_compat(struct task_struct *child,
587 addr_t addr, addr_t data)
588{
589 if (!test_thread_flag(TIF_31BIT) ||
590 (addr & 3) || addr > sizeof(struct user32) - 3)
591 return -EIO;
592
593 return __poke_user_compat(child, addr, data);
594}
595
596long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
597 compat_ulong_t caddr, compat_ulong_t cdata)
598{
599 unsigned long addr = caddr;
600 unsigned long data = cdata;
601 ptrace_area_emu31 parea;
602 int copied, ret;
603
604 switch (request) {
605 case PTRACE_PEEKUSR:
606
607 return peek_user_compat(child, addr, data);
608
609 case PTRACE_POKEUSR:
610
611 return poke_user_compat(child, addr, data);
612
613 case PTRACE_PEEKUSR_AREA:
614 case PTRACE_POKEUSR_AREA:
615 if (copy_from_user(&parea, (void __force __user *) addr,
616 sizeof(parea)))
617 return -EFAULT;
618 addr = parea.kernel_addr;
619 data = parea.process_addr;
620 copied = 0;
621 while (copied < parea.len) {
622 if (request == PTRACE_PEEKUSR_AREA)
623 ret = peek_user_compat(child, addr, data);
624 else {
625 __u32 utmp;
626 if (get_user(utmp,
627 (__u32 __force __user *) data))
628 return -EFAULT;
629 ret = poke_user_compat(child, addr, utmp);
630 }
631 if (ret)
632 return ret;
633 addr += sizeof(unsigned int);
634 data += sizeof(unsigned int);
635 copied += sizeof(unsigned int);
636 }
637 return 0;
638 }
639 return compat_ptrace_request(child, request, addr, data);
640}
641#endif
642
643asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
644{
645 long ret;
646
647
648
649
650
651 ret = regs->gprs[2];
652 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
653 (tracehook_report_syscall_entry(regs) ||
654 regs->gprs[2] >= NR_syscalls)) {
655
656
657
658
659
660 regs->svcnr = 0;
661 ret = -1;
662 }
663
664 if (unlikely(current->audit_context))
665 audit_syscall_entry(test_thread_flag(TIF_31BIT) ?
666 AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
667 regs->gprs[2], regs->orig_gpr2,
668 regs->gprs[3], regs->gprs[4],
669 regs->gprs[5]);
670 return ret;
671}
672
673asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
674{
675 if (unlikely(current->audit_context))
676 audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]),
677 regs->gprs[2]);
678
679 if (test_thread_flag(TIF_SYSCALL_TRACE))
680 tracehook_report_syscall_exit(regs, 0);
681}
682
683
684
685
686
687static int s390_regs_get(struct task_struct *target,
688 const struct user_regset *regset,
689 unsigned int pos, unsigned int count,
690 void *kbuf, void __user *ubuf)
691{
692 if (target == current)
693 save_access_regs(target->thread.acrs);
694
695 if (kbuf) {
696 unsigned long *k = kbuf;
697 while (count > 0) {
698 *k++ = __peek_user(target, pos);
699 count -= sizeof(*k);
700 pos += sizeof(*k);
701 }
702 } else {
703 unsigned long __user *u = ubuf;
704 while (count > 0) {
705 if (__put_user(__peek_user(target, pos), u++))
706 return -EFAULT;
707 count -= sizeof(*u);
708 pos += sizeof(*u);
709 }
710 }
711 return 0;
712}
713
714static int s390_regs_set(struct task_struct *target,
715 const struct user_regset *regset,
716 unsigned int pos, unsigned int count,
717 const void *kbuf, const void __user *ubuf)
718{
719 int rc = 0;
720
721 if (target == current)
722 save_access_regs(target->thread.acrs);
723
724 if (kbuf) {
725 const unsigned long *k = kbuf;
726 while (count > 0 && !rc) {
727 rc = __poke_user(target, pos, *k++);
728 count -= sizeof(*k);
729 pos += sizeof(*k);
730 }
731 } else {
732 const unsigned long __user *u = ubuf;
733 while (count > 0 && !rc) {
734 unsigned long word;
735 rc = __get_user(word, u++);
736 if (rc)
737 break;
738 rc = __poke_user(target, pos, word);
739 count -= sizeof(*u);
740 pos += sizeof(*u);
741 }
742 }
743
744 if (rc == 0 && target == current)
745 restore_access_regs(target->thread.acrs);
746
747 return rc;
748}
749
750static int s390_fpregs_get(struct task_struct *target,
751 const struct user_regset *regset, unsigned int pos,
752 unsigned int count, void *kbuf, void __user *ubuf)
753{
754 if (target == current)
755 save_fp_regs(&target->thread.fp_regs);
756
757 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
758 &target->thread.fp_regs, 0, -1);
759}
760
761static int s390_fpregs_set(struct task_struct *target,
762 const struct user_regset *regset, unsigned int pos,
763 unsigned int count, const void *kbuf,
764 const void __user *ubuf)
765{
766 int rc = 0;
767
768 if (target == current)
769 save_fp_regs(&target->thread.fp_regs);
770
771
772 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
773 u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
774 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
775 0, offsetof(s390_fp_regs, fprs));
776 if (rc)
777 return rc;
778 if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
779 return -EINVAL;
780 target->thread.fp_regs.fpc = fpc[0];
781 }
782
783 if (rc == 0 && count > 0)
784 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
785 target->thread.fp_regs.fprs,
786 offsetof(s390_fp_regs, fprs), -1);
787
788 if (rc == 0 && target == current)
789 restore_fp_regs(&target->thread.fp_regs);
790
791 return rc;
792}
793
794static const struct user_regset s390_regsets[] = {
795 [REGSET_GENERAL] = {
796 .core_note_type = NT_PRSTATUS,
797 .n = sizeof(s390_regs) / sizeof(long),
798 .size = sizeof(long),
799 .align = sizeof(long),
800 .get = s390_regs_get,
801 .set = s390_regs_set,
802 },
803 [REGSET_FP] = {
804 .core_note_type = NT_PRFPREG,
805 .n = sizeof(s390_fp_regs) / sizeof(long),
806 .size = sizeof(long),
807 .align = sizeof(long),
808 .get = s390_fpregs_get,
809 .set = s390_fpregs_set,
810 },
811};
812
813static const struct user_regset_view user_s390_view = {
814 .name = UTS_MACHINE,
815 .e_machine = EM_S390,
816 .regsets = s390_regsets,
817 .n = ARRAY_SIZE(s390_regsets)
818};
819
820#ifdef CONFIG_COMPAT
821static int s390_compat_regs_get(struct task_struct *target,
822 const struct user_regset *regset,
823 unsigned int pos, unsigned int count,
824 void *kbuf, void __user *ubuf)
825{
826 if (target == current)
827 save_access_regs(target->thread.acrs);
828
829 if (kbuf) {
830 compat_ulong_t *k = kbuf;
831 while (count > 0) {
832 *k++ = __peek_user_compat(target, pos);
833 count -= sizeof(*k);
834 pos += sizeof(*k);
835 }
836 } else {
837 compat_ulong_t __user *u = ubuf;
838 while (count > 0) {
839 if (__put_user(__peek_user_compat(target, pos), u++))
840 return -EFAULT;
841 count -= sizeof(*u);
842 pos += sizeof(*u);
843 }
844 }
845 return 0;
846}
847
848static int s390_compat_regs_set(struct task_struct *target,
849 const struct user_regset *regset,
850 unsigned int pos, unsigned int count,
851 const void *kbuf, const void __user *ubuf)
852{
853 int rc = 0;
854
855 if (target == current)
856 save_access_regs(target->thread.acrs);
857
858 if (kbuf) {
859 const compat_ulong_t *k = kbuf;
860 while (count > 0 && !rc) {
861 rc = __poke_user_compat(target, pos, *k++);
862 count -= sizeof(*k);
863 pos += sizeof(*k);
864 }
865 } else {
866 const compat_ulong_t __user *u = ubuf;
867 while (count > 0 && !rc) {
868 compat_ulong_t word;
869 rc = __get_user(word, u++);
870 if (rc)
871 break;
872 rc = __poke_user_compat(target, pos, word);
873 count -= sizeof(*u);
874 pos += sizeof(*u);
875 }
876 }
877
878 if (rc == 0 && target == current)
879 restore_access_regs(target->thread.acrs);
880
881 return rc;
882}
883
884static const struct user_regset s390_compat_regsets[] = {
885 [REGSET_GENERAL] = {
886 .core_note_type = NT_PRSTATUS,
887 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
888 .size = sizeof(compat_long_t),
889 .align = sizeof(compat_long_t),
890 .get = s390_compat_regs_get,
891 .set = s390_compat_regs_set,
892 },
893 [REGSET_FP] = {
894 .core_note_type = NT_PRFPREG,
895 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
896 .size = sizeof(compat_long_t),
897 .align = sizeof(compat_long_t),
898 .get = s390_fpregs_get,
899 .set = s390_fpregs_set,
900 },
901};
902
903static const struct user_regset_view user_s390_compat_view = {
904 .name = "s390",
905 .e_machine = EM_S390,
906 .regsets = s390_compat_regsets,
907 .n = ARRAY_SIZE(s390_compat_regsets)
908};
909#endif
910
911const struct user_regset_view *task_user_regset_view(struct task_struct *task)
912{
913#ifdef CONFIG_COMPAT
914 if (test_tsk_thread_flag(task, TIF_31BIT))
915 return &user_s390_compat_view;
916#endif
917 return &user_s390_view;
918}
919