linux/kernel/compat.c
<<
>>
Prefs
   1/*
   2 *  linux/kernel/compat.c
   3 *
   4 *  Kernel compatibililty routines for e.g. 32 bit syscall support
   5 *  on 64 bit kernels.
   6 *
   7 *  Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation
   8 *
   9 *  This program is free software; you can redistribute it and/or modify
  10 *  it under the terms of the GNU General Public License version 2 as
  11 *  published by the Free Software Foundation.
  12 */
  13
  14#include <linux/linkage.h>
  15#include <linux/compat.h>
  16#include <linux/errno.h>
  17#include <linux/time.h>
  18#include <linux/signal.h>
  19#include <linux/sched.h>        /* for MAX_SCHEDULE_TIMEOUT */
  20#include <linux/syscalls.h>
  21#include <linux/unistd.h>
  22#include <linux/security.h>
  23#include <linux/timex.h>
  24#include <linux/migrate.h>
  25#include <linux/posix-timers.h>
  26#include <linux/module.h>
  27
  28#include <asm/uaccess.h>
  29
  30int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts)
  31{
  32        return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) ||
  33                        __get_user(ts->tv_sec, &cts->tv_sec) ||
  34                        __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
  35}
  36
  37int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts)
  38{
  39        return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) ||
  40                        __put_user(ts->tv_sec, &cts->tv_sec) ||
  41                        __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
  42}
  43
  44static long compat_nanosleep_restart(struct restart_block *restart)
  45{
  46        struct compat_timespec __user *rmtp;
  47        struct timespec rmt;
  48        mm_segment_t oldfs;
  49        long ret;
  50
  51        restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
  52        oldfs = get_fs();
  53        set_fs(KERNEL_DS);
  54        ret = hrtimer_nanosleep_restart(restart);
  55        set_fs(oldfs);
  56
  57        if (ret) {
  58                rmtp = restart->nanosleep.compat_rmtp;
  59
  60                if (rmtp && put_compat_timespec(&rmt, rmtp))
  61                        return -EFAULT;
  62        }
  63
  64        return ret;
  65}
  66
  67asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
  68                                     struct compat_timespec __user *rmtp)
  69{
  70        struct timespec tu, rmt;
  71        mm_segment_t oldfs;
  72        long ret;
  73
  74        if (get_compat_timespec(&tu, rqtp))
  75                return -EFAULT;
  76
  77        if (!timespec_valid(&tu))
  78                return -EINVAL;
  79
  80        oldfs = get_fs();
  81        set_fs(KERNEL_DS);
  82        ret = hrtimer_nanosleep(&tu,
  83                                rmtp ? (struct timespec __user *)&rmt : NULL,
  84                                HRTIMER_MODE_REL, CLOCK_MONOTONIC);
  85        set_fs(oldfs);
  86
  87        if (ret) {
  88                struct restart_block *restart
  89                        = &current_thread_info()->restart_block;
  90
  91                restart->fn = compat_nanosleep_restart;
  92                restart->nanosleep.compat_rmtp = rmtp;
  93
  94                if (rmtp && put_compat_timespec(&rmt, rmtp))
  95                        return -EFAULT;
  96        }
  97
  98        return ret;
  99}
 100
 101static inline long get_compat_itimerval(struct itimerval *o,
 102                struct compat_itimerval __user *i)
 103{
 104        return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
 105                (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) |
 106                 __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) |
 107                 __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) |
 108                 __get_user(o->it_value.tv_usec, &i->it_value.tv_usec)));
 109}
 110
 111static inline long put_compat_itimerval(struct compat_itimerval __user *o,
 112                struct itimerval *i)
 113{
 114        return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
 115                (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) |
 116                 __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) |
 117                 __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) |
 118                 __put_user(i->it_value.tv_usec, &o->it_value.tv_usec)));
 119}
 120
 121asmlinkage long compat_sys_getitimer(int which,
 122                struct compat_itimerval __user *it)
 123{
 124        struct itimerval kit;
 125        int error;
 126
 127        error = do_getitimer(which, &kit);
 128        if (!error && put_compat_itimerval(it, &kit))
 129                error = -EFAULT;
 130        return error;
 131}
 132
 133asmlinkage long compat_sys_setitimer(int which,
 134                struct compat_itimerval __user *in,
 135                struct compat_itimerval __user *out)
 136{
 137        struct itimerval kin, kout;
 138        int error;
 139
 140        if (in) {
 141                if (get_compat_itimerval(&kin, in))
 142                        return -EFAULT;
 143        } else
 144                memset(&kin, 0, sizeof(kin));
 145
 146        error = do_setitimer(which, &kin, out ? &kout : NULL);
 147        if (error || !out)
 148                return error;
 149        if (put_compat_itimerval(out, &kout))
 150                return -EFAULT;
 151        return 0;
 152}
 153
 154asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
 155{
 156        /*
 157         *      In the SMP world we might just be unlucky and have one of
 158         *      the times increment as we use it. Since the value is an
 159         *      atomically safe type this is just fine. Conceptually its
 160         *      as if the syscall took an instant longer to occur.
 161         */
 162        if (tbuf) {
 163                struct compat_tms tmp;
 164                struct task_struct *tsk = current;
 165                struct task_struct *t;
 166                cputime_t utime, stime, cutime, cstime;
 167
 168                read_lock(&tasklist_lock);
 169                utime = tsk->signal->utime;
 170                stime = tsk->signal->stime;
 171                t = tsk;
 172                do {
 173                        utime = cputime_add(utime, t->utime);
 174                        stime = cputime_add(stime, t->stime);
 175                        t = next_thread(t);
 176                } while (t != tsk);
 177
 178                /*
 179                 * While we have tasklist_lock read-locked, no dying thread
 180                 * can be updating current->signal->[us]time.  Instead,
 181                 * we got their counts included in the live thread loop.
 182                 * However, another thread can come in right now and
 183                 * do a wait call that updates current->signal->c[us]time.
 184                 * To make sure we always see that pair updated atomically,
 185                 * we take the siglock around fetching them.
 186                 */
 187                spin_lock_irq(&tsk->sighand->siglock);
 188                cutime = tsk->signal->cutime;
 189                cstime = tsk->signal->cstime;
 190                spin_unlock_irq(&tsk->sighand->siglock);
 191                read_unlock(&tasklist_lock);
 192
 193                tmp.tms_utime = compat_jiffies_to_clock_t(cputime_to_jiffies(utime));
 194                tmp.tms_stime = compat_jiffies_to_clock_t(cputime_to_jiffies(stime));
 195                tmp.tms_cutime = compat_jiffies_to_clock_t(cputime_to_jiffies(cutime));
 196                tmp.tms_cstime = compat_jiffies_to_clock_t(cputime_to_jiffies(cstime));
 197                if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
 198                        return -EFAULT;
 199        }
 200        return compat_jiffies_to_clock_t(jiffies);
 201}
 202
 203/*
 204 * Assumption: old_sigset_t and compat_old_sigset_t are both
 205 * types that can be passed to put_user()/get_user().
 206 */
 207
 208asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
 209{
 210        old_sigset_t s;
 211        long ret;
 212        mm_segment_t old_fs = get_fs();
 213
 214        set_fs(KERNEL_DS);
 215        ret = sys_sigpending((old_sigset_t __user *) &s);
 216        set_fs(old_fs);
 217        if (ret == 0)
 218                ret = put_user(s, set);
 219        return ret;
 220}
 221
 222asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
 223                compat_old_sigset_t __user *oset)
 224{
 225        old_sigset_t s;
 226        long ret;
 227        mm_segment_t old_fs;
 228
 229        if (set && get_user(s, set))
 230                return -EFAULT;
 231        old_fs = get_fs();
 232        set_fs(KERNEL_DS);
 233        ret = sys_sigprocmask(how,
 234                              set ? (old_sigset_t __user *) &s : NULL,
 235                              oset ? (old_sigset_t __user *) &s : NULL);
 236        set_fs(old_fs);
 237        if (ret == 0)
 238                if (oset)
 239                        ret = put_user(s, oset);
 240        return ret;
 241}
 242
 243asmlinkage long compat_sys_setrlimit(unsigned int resource,
 244                struct compat_rlimit __user *rlim)
 245{
 246        struct rlimit r;
 247        int ret;
 248        mm_segment_t old_fs = get_fs ();
 249
 250        if (resource >= RLIM_NLIMITS)
 251                return -EINVAL;
 252
 253        if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) ||
 254            __get_user(r.rlim_cur, &rlim->rlim_cur) ||
 255            __get_user(r.rlim_max, &rlim->rlim_max))
 256                return -EFAULT;
 257
 258        if (r.rlim_cur == COMPAT_RLIM_INFINITY)
 259                r.rlim_cur = RLIM_INFINITY;
 260        if (r.rlim_max == COMPAT_RLIM_INFINITY)
 261                r.rlim_max = RLIM_INFINITY;
 262        set_fs(KERNEL_DS);
 263        ret = sys_setrlimit(resource, (struct rlimit __user *) &r);
 264        set_fs(old_fs);
 265        return ret;
 266}
 267
 268#ifdef COMPAT_RLIM_OLD_INFINITY
 269
 270asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
 271                struct compat_rlimit __user *rlim)
 272{
 273        struct rlimit r;
 274        int ret;
 275        mm_segment_t old_fs = get_fs();
 276
 277        set_fs(KERNEL_DS);
 278        ret = sys_old_getrlimit(resource, &r);
 279        set_fs(old_fs);
 280
 281        if (!ret) {
 282                if (r.rlim_cur > COMPAT_RLIM_OLD_INFINITY)
 283                        r.rlim_cur = COMPAT_RLIM_INFINITY;
 284                if (r.rlim_max > COMPAT_RLIM_OLD_INFINITY)
 285                        r.rlim_max = COMPAT_RLIM_INFINITY;
 286
 287                if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) ||
 288                    __put_user(r.rlim_cur, &rlim->rlim_cur) ||
 289                    __put_user(r.rlim_max, &rlim->rlim_max))
 290                        return -EFAULT;
 291        }
 292        return ret;
 293}
 294
 295#endif
 296
 297asmlinkage long compat_sys_getrlimit (unsigned int resource,
 298                struct compat_rlimit __user *rlim)
 299{
 300        struct rlimit r;
 301        int ret;
 302        mm_segment_t old_fs = get_fs();
 303
 304        set_fs(KERNEL_DS);
 305        ret = sys_getrlimit(resource, (struct rlimit __user *) &r);
 306        set_fs(old_fs);
 307        if (!ret) {
 308                if (r.rlim_cur > COMPAT_RLIM_INFINITY)
 309                        r.rlim_cur = COMPAT_RLIM_INFINITY;
 310                if (r.rlim_max > COMPAT_RLIM_INFINITY)
 311                        r.rlim_max = COMPAT_RLIM_INFINITY;
 312
 313                if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) ||
 314                    __put_user(r.rlim_cur, &rlim->rlim_cur) ||
 315                    __put_user(r.rlim_max, &rlim->rlim_max))
 316                        return -EFAULT;
 317        }
 318        return ret;
 319}
 320
 321int put_compat_rusage(const struct rusage *r, struct compat_rusage __user *ru)
 322{
 323        if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)) ||
 324            __put_user(r->ru_utime.tv_sec, &ru->ru_utime.tv_sec) ||
 325            __put_user(r->ru_utime.tv_usec, &ru->ru_utime.tv_usec) ||
 326            __put_user(r->ru_stime.tv_sec, &ru->ru_stime.tv_sec) ||
 327            __put_user(r->ru_stime.tv_usec, &ru->ru_stime.tv_usec) ||
 328            __put_user(r->ru_maxrss, &ru->ru_maxrss) ||
 329            __put_user(r->ru_ixrss, &ru->ru_ixrss) ||
 330            __put_user(r->ru_idrss, &ru->ru_idrss) ||
 331            __put_user(r->ru_isrss, &ru->ru_isrss) ||
 332            __put_user(r->ru_minflt, &ru->ru_minflt) ||
 333            __put_user(r->ru_majflt, &ru->ru_majflt) ||
 334            __put_user(r->ru_nswap, &ru->ru_nswap) ||
 335            __put_user(r->ru_inblock, &ru->ru_inblock) ||
 336            __put_user(r->ru_oublock, &ru->ru_oublock) ||
 337            __put_user(r->ru_msgsnd, &ru->ru_msgsnd) ||
 338            __put_user(r->ru_msgrcv, &ru->ru_msgrcv) ||
 339            __put_user(r->ru_nsignals, &ru->ru_nsignals) ||
 340            __put_user(r->ru_nvcsw, &ru->ru_nvcsw) ||
 341            __put_user(r->ru_nivcsw, &ru->ru_nivcsw))
 342                return -EFAULT;
 343        return 0;
 344}
 345
 346asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
 347{
 348        struct rusage r;
 349        int ret;
 350        mm_segment_t old_fs = get_fs();
 351
 352        set_fs(KERNEL_DS);
 353        ret = sys_getrusage(who, (struct rusage __user *) &r);
 354        set_fs(old_fs);
 355
 356        if (ret)
 357                return ret;
 358
 359        if (put_compat_rusage(&r, ru))
 360                return -EFAULT;
 361
 362        return 0;
 363}
 364
 365asmlinkage long
 366compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
 367        struct compat_rusage __user *ru)
 368{
 369        if (!ru) {
 370                return sys_wait4(pid, stat_addr, options, NULL);
 371        } else {
 372                struct rusage r;
 373                int ret;
 374                unsigned int status;
 375                mm_segment_t old_fs = get_fs();
 376
 377                set_fs (KERNEL_DS);
 378                ret = sys_wait4(pid,
 379                                (stat_addr ?
 380                                 (unsigned int __user *) &status : NULL),
 381                                options, (struct rusage __user *) &r);
 382                set_fs (old_fs);
 383
 384                if (ret > 0) {
 385                        if (put_compat_rusage(&r, ru))
 386                                return -EFAULT;
 387                        if (stat_addr && put_user(status, stat_addr))
 388                                return -EFAULT;
 389                }
 390                return ret;
 391        }
 392}
 393
 394asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
 395                struct compat_siginfo __user *uinfo, int options,
 396                struct compat_rusage __user *uru)
 397{
 398        siginfo_t info;
 399        struct rusage ru;
 400        long ret;
 401        mm_segment_t old_fs = get_fs();
 402
 403        memset(&info, 0, sizeof(info));
 404
 405        set_fs(KERNEL_DS);
 406        ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
 407                         uru ? (struct rusage __user *)&ru : NULL);
 408        set_fs(old_fs);
 409
 410        if ((ret < 0) || (info.si_signo == 0))
 411                return ret;
 412
 413        if (uru) {
 414                ret = put_compat_rusage(&ru, uru);
 415                if (ret)
 416                        return ret;
 417        }
 418
 419        BUG_ON(info.si_code & __SI_MASK);
 420        info.si_code |= __SI_CHLD;
 421        return copy_siginfo_to_user32(uinfo, &info);
 422}
 423
 424static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr,
 425                                    unsigned len, cpumask_t *new_mask)
 426{
 427        unsigned long *k;
 428
 429        if (len < sizeof(cpumask_t))
 430                memset(new_mask, 0, sizeof(cpumask_t));
 431        else if (len > sizeof(cpumask_t))
 432                len = sizeof(cpumask_t);
 433
 434        k = cpus_addr(*new_mask);
 435        return compat_get_bitmap(k, user_mask_ptr, len * 8);
 436}
 437
 438asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
 439                                             unsigned int len,
 440                                             compat_ulong_t __user *user_mask_ptr)
 441{
 442        cpumask_t new_mask;
 443        int retval;
 444
 445        retval = compat_get_user_cpu_mask(user_mask_ptr, len, &new_mask);
 446        if (retval)
 447                return retval;
 448
 449        return sched_setaffinity(pid, &new_mask);
 450}
 451
 452asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
 453                                             compat_ulong_t __user *user_mask_ptr)
 454{
 455        int ret;
 456        cpumask_t mask;
 457        unsigned long *k;
 458        unsigned int min_length = sizeof(cpumask_t);
 459
 460        if (NR_CPUS <= BITS_PER_COMPAT_LONG)
 461                min_length = sizeof(compat_ulong_t);
 462
 463        if (len < min_length)
 464                return -EINVAL;
 465
 466        ret = sched_getaffinity(pid, &mask);
 467        if (ret < 0)
 468                return ret;
 469
 470        k = cpus_addr(mask);
 471        ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8);
 472        if (ret)
 473                return ret;
 474
 475        return min_length;
 476}
 477
 478int get_compat_itimerspec(struct itimerspec *dst,
 479                          const struct compat_itimerspec __user *src)
 480{
 481        if (get_compat_timespec(&dst->it_interval, &src->it_interval) ||
 482            get_compat_timespec(&dst->it_value, &src->it_value))
 483                return -EFAULT;
 484        return 0;
 485}
 486
 487int put_compat_itimerspec(struct compat_itimerspec __user *dst,
 488                          const struct itimerspec *src)
 489{
 490        if (put_compat_timespec(&src->it_interval, &dst->it_interval) ||
 491            put_compat_timespec(&src->it_value, &dst->it_value))
 492                return -EFAULT;
 493        return 0;
 494}
 495
 496long compat_sys_timer_create(clockid_t which_clock,
 497                        struct compat_sigevent __user *timer_event_spec,
 498                        timer_t __user *created_timer_id)
 499{
 500        struct sigevent __user *event = NULL;
 501
 502        if (timer_event_spec) {
 503                struct sigevent kevent;
 504
 505                event = compat_alloc_user_space(sizeof(*event));
 506                if (get_compat_sigevent(&kevent, timer_event_spec) ||
 507                    copy_to_user(event, &kevent, sizeof(*event)))
 508                        return -EFAULT;
 509        }
 510
 511        return sys_timer_create(which_clock, event, created_timer_id);
 512}
 513
 514long compat_sys_timer_settime(timer_t timer_id, int flags,
 515                          struct compat_itimerspec __user *new,
 516                          struct compat_itimerspec __user *old)
 517{
 518        long err;
 519        mm_segment_t oldfs;
 520        struct itimerspec newts, oldts;
 521
 522        if (!new)
 523                return -EINVAL;
 524        if (get_compat_itimerspec(&newts, new))
 525                return -EFAULT;
 526        oldfs = get_fs();
 527        set_fs(KERNEL_DS);
 528        err = sys_timer_settime(timer_id, flags,
 529                                (struct itimerspec __user *) &newts,
 530                                (struct itimerspec __user *) &oldts);
 531        set_fs(oldfs);
 532        if (!err && old && put_compat_itimerspec(old, &oldts))
 533                return -EFAULT;
 534        return err;
 535}
 536
 537long compat_sys_timer_gettime(timer_t timer_id,
 538                struct compat_itimerspec __user *setting)
 539{
 540        long err;
 541        mm_segment_t oldfs;
 542        struct itimerspec ts;
 543
 544        oldfs = get_fs();
 545        set_fs(KERNEL_DS);
 546        err = sys_timer_gettime(timer_id,
 547                                (struct itimerspec __user *) &ts);
 548        set_fs(oldfs);
 549        if (!err && put_compat_itimerspec(setting, &ts))
 550                return -EFAULT;
 551        return err;
 552}
 553
 554long compat_sys_clock_settime(clockid_t which_clock,
 555                struct compat_timespec __user *tp)
 556{
 557        long err;
 558        mm_segment_t oldfs;
 559        struct timespec ts;
 560
 561        if (get_compat_timespec(&ts, tp))
 562                return -EFAULT;
 563        oldfs = get_fs();
 564        set_fs(KERNEL_DS);
 565        err = sys_clock_settime(which_clock,
 566                                (struct timespec __user *) &ts);
 567        set_fs(oldfs);
 568        return err;
 569}
 570
 571long compat_sys_clock_gettime(clockid_t which_clock,
 572                struct compat_timespec __user *tp)
 573{
 574        long err;
 575        mm_segment_t oldfs;
 576        struct timespec ts;
 577
 578        oldfs = get_fs();
 579        set_fs(KERNEL_DS);
 580        err = sys_clock_gettime(which_clock,
 581                                (struct timespec __user *) &ts);
 582        set_fs(oldfs);
 583        if (!err && put_compat_timespec(&ts, tp))
 584                return -EFAULT;
 585        return err;
 586}
 587
 588long compat_sys_clock_getres(clockid_t which_clock,
 589                struct compat_timespec __user *tp)
 590{
 591        long err;
 592        mm_segment_t oldfs;
 593        struct timespec ts;
 594
 595        oldfs = get_fs();
 596        set_fs(KERNEL_DS);
 597        err = sys_clock_getres(which_clock,
 598                               (struct timespec __user *) &ts);
 599        set_fs(oldfs);
 600        if (!err && tp && put_compat_timespec(&ts, tp))
 601                return -EFAULT;
 602        return err;
 603}
 604
 605static long compat_clock_nanosleep_restart(struct restart_block *restart)
 606{
 607        long err;
 608        mm_segment_t oldfs;
 609        struct timespec tu;
 610        struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
 611
 612        restart->nanosleep.rmtp = (struct timespec __user *) &tu;
 613        oldfs = get_fs();
 614        set_fs(KERNEL_DS);
 615        err = clock_nanosleep_restart(restart);
 616        set_fs(oldfs);
 617
 618        if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
 619            put_compat_timespec(&tu, rmtp))
 620                return -EFAULT;
 621
 622        if (err == -ERESTART_RESTARTBLOCK) {
 623                restart->fn = compat_clock_nanosleep_restart;
 624                restart->nanosleep.compat_rmtp = rmtp;
 625        }
 626        return err;
 627}
 628
 629long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
 630                            struct compat_timespec __user *rqtp,
 631                            struct compat_timespec __user *rmtp)
 632{
 633        long err;
 634        mm_segment_t oldfs;
 635        struct timespec in, out;
 636        struct restart_block *restart;
 637
 638        if (get_compat_timespec(&in, rqtp))
 639                return -EFAULT;
 640
 641        oldfs = get_fs();
 642        set_fs(KERNEL_DS);
 643        err = sys_clock_nanosleep(which_clock, flags,
 644                                  (struct timespec __user *) &in,
 645                                  (struct timespec __user *) &out);
 646        set_fs(oldfs);
 647
 648        if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
 649            put_compat_timespec(&out, rmtp))
 650                return -EFAULT;
 651
 652        if (err == -ERESTART_RESTARTBLOCK) {
 653                restart = &current_thread_info()->restart_block;
 654                restart->fn = compat_clock_nanosleep_restart;
 655                restart->nanosleep.compat_rmtp = rmtp;
 656        }
 657        return err;
 658}
 659
 660/*
 661 * We currently only need the following fields from the sigevent
 662 * structure: sigev_value, sigev_signo, sig_notify and (sometimes
 663 * sigev_notify_thread_id).  The others are handled in user mode.
 664 * We also assume that copying sigev_value.sival_int is sufficient
 665 * to keep all the bits of sigev_value.sival_ptr intact.
 666 */
 667int get_compat_sigevent(struct sigevent *event,
 668                const struct compat_sigevent __user *u_event)
 669{
 670        memset(event, 0, sizeof(*event));
 671        return (!access_ok(VERIFY_READ, u_event, sizeof(*u_event)) ||
 672                __get_user(event->sigev_value.sival_int,
 673                        &u_event->sigev_value.sival_int) ||
 674                __get_user(event->sigev_signo, &u_event->sigev_signo) ||
 675                __get_user(event->sigev_notify, &u_event->sigev_notify) ||
 676                __get_user(event->sigev_notify_thread_id,
 677                        &u_event->sigev_notify_thread_id))
 678                ? -EFAULT : 0;
 679}
 680
 681long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
 682                       unsigned long bitmap_size)
 683{
 684        int i, j;
 685        unsigned long m;
 686        compat_ulong_t um;
 687        unsigned long nr_compat_longs;
 688
 689        /* align bitmap up to nearest compat_long_t boundary */
 690        bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
 691
 692        if (!access_ok(VERIFY_READ, umask, bitmap_size / 8))
 693                return -EFAULT;
 694
 695        nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
 696
 697        for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) {
 698                m = 0;
 699
 700                for (j = 0; j < sizeof(m)/sizeof(um); j++) {
 701                        /*
 702                         * We dont want to read past the end of the userspace
 703                         * bitmap. We must however ensure the end of the
 704                         * kernel bitmap is zeroed.
 705                         */
 706                        if (nr_compat_longs-- > 0) {
 707                                if (__get_user(um, umask))
 708                                        return -EFAULT;
 709                        } else {
 710                                um = 0;
 711                        }
 712
 713                        umask++;
 714                        m |= (long)um << (j * BITS_PER_COMPAT_LONG);
 715                }
 716                *mask++ = m;
 717        }
 718
 719        return 0;
 720}
 721
 722long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
 723                       unsigned long bitmap_size)
 724{
 725        int i, j;
 726        unsigned long m;
 727        compat_ulong_t um;
 728        unsigned long nr_compat_longs;
 729
 730        /* align bitmap up to nearest compat_long_t boundary */
 731        bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
 732
 733        if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8))
 734                return -EFAULT;
 735
 736        nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
 737
 738        for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) {
 739                m = *mask++;
 740
 741                for (j = 0; j < sizeof(m)/sizeof(um); j++) {
 742                        um = m;
 743
 744                        /*
 745                         * We dont want to write past the end of the userspace
 746                         * bitmap.
 747                         */
 748                        if (nr_compat_longs-- > 0) {
 749                                if (__put_user(um, umask))
 750                                        return -EFAULT;
 751                        }
 752
 753                        umask++;
 754                        m >>= 4*sizeof(um);
 755                        m >>= 4*sizeof(um);
 756                }
 757        }
 758
 759        return 0;
 760}
 761
 762void
 763sigset_from_compat (sigset_t *set, compat_sigset_t *compat)
 764{
 765        switch (_NSIG_WORDS) {
 766        case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 );
 767        case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 );
 768        case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 );
 769        case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 );
 770        }
 771}
 772
 773asmlinkage long
 774compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese,
 775                struct compat_siginfo __user *uinfo,
 776                struct compat_timespec __user *uts, compat_size_t sigsetsize)
 777{
 778        compat_sigset_t s32;
 779        sigset_t s;
 780        int sig;
 781        struct timespec t;
 782        siginfo_t info;
 783        long ret, timeout = 0;
 784
 785        if (sigsetsize != sizeof(sigset_t))
 786                return -EINVAL;
 787
 788        if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t)))
 789                return -EFAULT;
 790        sigset_from_compat(&s, &s32);
 791        sigdelsetmask(&s,sigmask(SIGKILL)|sigmask(SIGSTOP));
 792        signotset(&s);
 793
 794        if (uts) {
 795                if (get_compat_timespec (&t, uts))
 796                        return -EFAULT;
 797                if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0
 798                                || t.tv_sec < 0)
 799                        return -EINVAL;
 800        }
 801
 802        spin_lock_irq(&current->sighand->siglock);
 803        sig = dequeue_signal(current, &s, &info);
 804        if (!sig) {
 805                timeout = MAX_SCHEDULE_TIMEOUT;
 806                if (uts)
 807                        timeout = timespec_to_jiffies(&t)
 808                                +(t.tv_sec || t.tv_nsec);
 809                if (timeout) {
 810                        current->real_blocked = current->blocked;
 811                        sigandsets(&current->blocked, &current->blocked, &s);
 812
 813                        recalc_sigpending();
 814                        spin_unlock_irq(&current->sighand->siglock);
 815
 816                        timeout = schedule_timeout_interruptible(timeout);
 817
 818                        spin_lock_irq(&current->sighand->siglock);
 819                        sig = dequeue_signal(current, &s, &info);
 820                        current->blocked = current->real_blocked;
 821                        siginitset(&current->real_blocked, 0);
 822                        recalc_sigpending();
 823                }
 824        }
 825        spin_unlock_irq(&current->sighand->siglock);
 826
 827        if (sig) {
 828                ret = sig;
 829                if (uinfo) {
 830                        if (copy_siginfo_to_user32(uinfo, &info))
 831                                ret = -EFAULT;
 832                }
 833        }else {
 834                ret = timeout?-EINTR:-EAGAIN;
 835        }
 836        return ret;
 837
 838}
 839
 840#ifdef __ARCH_WANT_COMPAT_SYS_TIME
 841
 842/* compat_time_t is a 32 bit "long" and needs to get converted. */
 843
 844asmlinkage long compat_sys_time(compat_time_t __user * tloc)
 845{
 846        compat_time_t i;
 847        struct timeval tv;
 848
 849        do_gettimeofday(&tv);
 850        i = tv.tv_sec;
 851
 852        if (tloc) {
 853                if (put_user(i,tloc))
 854                        i = -EFAULT;
 855        }
 856        return i;
 857}
 858
 859asmlinkage long compat_sys_stime(compat_time_t __user *tptr)
 860{
 861        struct timespec tv;
 862        int err;
 863
 864        if (get_user(tv.tv_sec, tptr))
 865                return -EFAULT;
 866
 867        tv.tv_nsec = 0;
 868
 869        err = security_settime(&tv, NULL);
 870        if (err)
 871                return err;
 872
 873        do_settimeofday(&tv);
 874        return 0;
 875}
 876
 877#endif /* __ARCH_WANT_COMPAT_SYS_TIME */
 878
 879#ifdef __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
 880asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat_size_t sigsetsize)
 881{
 882        sigset_t newset;
 883        compat_sigset_t newset32;
 884
 885        /* XXX: Don't preclude handling different sized sigset_t's.  */
 886        if (sigsetsize != sizeof(sigset_t))
 887                return -EINVAL;
 888
 889        if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
 890                return -EFAULT;
 891        sigset_from_compat(&newset, &newset32);
 892        sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
 893
 894        spin_lock_irq(&current->sighand->siglock);
 895        current->saved_sigmask = current->blocked;
 896        current->blocked = newset;
 897        recalc_sigpending();
 898        spin_unlock_irq(&current->sighand->siglock);
 899
 900        current->state = TASK_INTERRUPTIBLE;
 901        schedule();
 902        set_restore_sigmask();
 903        return -ERESTARTNOHAND;
 904}
 905#endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */
 906
 907asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp)
 908{
 909        struct timex txc;
 910        int ret;
 911
 912        memset(&txc, 0, sizeof(struct timex));
 913
 914        if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) ||
 915                        __get_user(txc.modes, &utp->modes) ||
 916                        __get_user(txc.offset, &utp->offset) ||
 917                        __get_user(txc.freq, &utp->freq) ||
 918                        __get_user(txc.maxerror, &utp->maxerror) ||
 919                        __get_user(txc.esterror, &utp->esterror) ||
 920                        __get_user(txc.status, &utp->status) ||
 921                        __get_user(txc.constant, &utp->constant) ||
 922                        __get_user(txc.precision, &utp->precision) ||
 923                        __get_user(txc.tolerance, &utp->tolerance) ||
 924                        __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
 925                        __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
 926                        __get_user(txc.tick, &utp->tick) ||
 927                        __get_user(txc.ppsfreq, &utp->ppsfreq) ||
 928                        __get_user(txc.jitter, &utp->jitter) ||
 929                        __get_user(txc.shift, &utp->shift) ||
 930                        __get_user(txc.stabil, &utp->stabil) ||
 931                        __get_user(txc.jitcnt, &utp->jitcnt) ||
 932                        __get_user(txc.calcnt, &utp->calcnt) ||
 933                        __get_user(txc.errcnt, &utp->errcnt) ||
 934                        __get_user(txc.stbcnt, &utp->stbcnt))
 935                return -EFAULT;
 936
 937        ret = do_adjtimex(&txc);
 938
 939        if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) ||
 940                        __put_user(txc.modes, &utp->modes) ||
 941                        __put_user(txc.offset, &utp->offset) ||
 942                        __put_user(txc.freq, &utp->freq) ||
 943                        __put_user(txc.maxerror, &utp->maxerror) ||
 944                        __put_user(txc.esterror, &utp->esterror) ||
 945                        __put_user(txc.status, &utp->status) ||
 946                        __put_user(txc.constant, &utp->constant) ||
 947                        __put_user(txc.precision, &utp->precision) ||
 948                        __put_user(txc.tolerance, &utp->tolerance) ||
 949                        __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
 950                        __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
 951                        __put_user(txc.tick, &utp->tick) ||
 952                        __put_user(txc.ppsfreq, &utp->ppsfreq) ||
 953                        __put_user(txc.jitter, &utp->jitter) ||
 954                        __put_user(txc.shift, &utp->shift) ||
 955                        __put_user(txc.stabil, &utp->stabil) ||
 956                        __put_user(txc.jitcnt, &utp->jitcnt) ||
 957                        __put_user(txc.calcnt, &utp->calcnt) ||
 958                        __put_user(txc.errcnt, &utp->errcnt) ||
 959                        __put_user(txc.stbcnt, &utp->stbcnt) ||
 960                        __put_user(txc.tai, &utp->tai))
 961                ret = -EFAULT;
 962
 963        return ret;
 964}
 965
 966#ifdef CONFIG_NUMA
 967asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages,
 968                compat_uptr_t __user *pages32,
 969                const int __user *nodes,
 970                int __user *status,
 971                int flags)
 972{
 973        const void __user * __user *pages;
 974        int i;
 975
 976        pages = compat_alloc_user_space(nr_pages * sizeof(void *));
 977        for (i = 0; i < nr_pages; i++) {
 978                compat_uptr_t p;
 979
 980                if (get_user(p, pages32 + i) ||
 981                        put_user(compat_ptr(p), pages + i))
 982                        return -EFAULT;
 983        }
 984        return sys_move_pages(pid, nr_pages, pages, nodes, status, flags);
 985}
 986
 987asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
 988                        compat_ulong_t maxnode,
 989                        const compat_ulong_t __user *old_nodes,
 990                        const compat_ulong_t __user *new_nodes)
 991{
 992        unsigned long __user *old = NULL;
 993        unsigned long __user *new = NULL;
 994        nodemask_t tmp_mask;
 995        unsigned long nr_bits;
 996        unsigned long size;
 997
 998        nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
 999        size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1000        if (old_nodes) {
1001                if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1002                        return -EFAULT;
1003                old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1004                if (new_nodes)
1005                        new = old + size / sizeof(unsigned long);
1006                if (copy_to_user(old, nodes_addr(tmp_mask), size))
1007                        return -EFAULT;
1008        }
1009        if (new_nodes) {
1010                if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1011                        return -EFAULT;
1012                if (new == NULL)
1013                        new = compat_alloc_user_space(size);
1014                if (copy_to_user(new, nodes_addr(tmp_mask), size))
1015                        return -EFAULT;
1016        }
1017        return sys_migrate_pages(pid, nr_bits + 1, old, new);
1018}
1019#endif
1020
1021struct compat_sysinfo {
1022        s32 uptime;
1023        u32 loads[3];
1024        u32 totalram;
1025        u32 freeram;
1026        u32 sharedram;
1027        u32 bufferram;
1028        u32 totalswap;
1029        u32 freeswap;
1030        u16 procs;
1031        u16 pad;
1032        u32 totalhigh;
1033        u32 freehigh;
1034        u32 mem_unit;
1035        char _f[20-2*sizeof(u32)-sizeof(int)];
1036};
1037
1038asmlinkage long
1039compat_sys_sysinfo(struct compat_sysinfo __user *info)
1040{
1041        struct sysinfo s;
1042
1043        do_sysinfo(&s);
1044
1045        /* Check to see if any memory value is too large for 32-bit and scale
1046         *  down if needed
1047         */
1048        if ((s.totalram >> 32) || (s.totalswap >> 32)) {
1049                int bitcount = 0;
1050
1051                while (s.mem_unit < PAGE_SIZE) {
1052                        s.mem_unit <<= 1;
1053                        bitcount++;
1054                }
1055
1056                s.totalram >>= bitcount;
1057                s.freeram >>= bitcount;
1058                s.sharedram >>= bitcount;
1059                s.bufferram >>= bitcount;
1060                s.totalswap >>= bitcount;
1061                s.freeswap >>= bitcount;
1062                s.totalhigh >>= bitcount;
1063                s.freehigh >>= bitcount;
1064        }
1065
1066        if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
1067            __put_user (s.uptime, &info->uptime) ||
1068            __put_user (s.loads[0], &info->loads[0]) ||
1069            __put_user (s.loads[1], &info->loads[1]) ||
1070            __put_user (s.loads[2], &info->loads[2]) ||
1071            __put_user (s.totalram, &info->totalram) ||
1072            __put_user (s.freeram, &info->freeram) ||
1073            __put_user (s.sharedram, &info->sharedram) ||
1074            __put_user (s.bufferram, &info->bufferram) ||
1075            __put_user (s.totalswap, &info->totalswap) ||
1076            __put_user (s.freeswap, &info->freeswap) ||
1077            __put_user (s.procs, &info->procs) ||
1078            __put_user (s.totalhigh, &info->totalhigh) ||
1079            __put_user (s.freehigh, &info->freehigh) ||
1080            __put_user (s.mem_unit, &info->mem_unit))
1081                return -EFAULT;
1082
1083        return 0;
1084}
1085
1086/*
1087 * Allocate user-space memory for the duration of a single system call,
1088 * in order to marshall parameters inside a compat thunk.
1089 */
1090void __user *compat_alloc_user_space(unsigned long len)
1091{
1092        void __user *ptr;
1093
1094        /* If len would occupy more than half of the entire compat space... */
1095        if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
1096                return NULL;
1097
1098        ptr = arch_compat_alloc_user_space(len);
1099
1100        if (unlikely(!access_ok(VERIFY_WRITE, ptr, len)))
1101                return NULL;
1102
1103        return ptr;
1104}
1105EXPORT_SYMBOL_GPL(compat_alloc_user_space);
1106