linux/arch/ia64/ia32/sys_ia32.c
<<
>>
Prefs
   1/*
   2 * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c.
   3 *
   4 * Copyright (C) 2000           VA Linux Co
   5 * Copyright (C) 2000           Don Dugger <n0ano@valinux.com>
   6 * Copyright (C) 1999           Arun Sharma <arun.sharma@intel.com>
   7 * Copyright (C) 1997,1998      Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   8 * Copyright (C) 1997           David S. Miller (davem@caip.rutgers.edu)
   9 * Copyright (C) 2000-2003, 2005 Hewlett-Packard Co
  10 *      David Mosberger-Tang <davidm@hpl.hp.com>
  11 * Copyright (C) 2004           Gordon Jin <gordon.jin@intel.com>
  12 *
  13 * These routines maintain argument size conversion between 32bit and 64bit
  14 * environment.
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/syscalls.h>
  19#include <linux/sysctl.h>
  20#include <linux/sched.h>
  21#include <linux/fs.h>
  22#include <linux/file.h>
  23#include <linux/signal.h>
  24#include <linux/resource.h>
  25#include <linux/times.h>
  26#include <linux/utsname.h>
  27#include <linux/smp.h>
  28#include <linux/smp_lock.h>
  29#include <linux/sem.h>
  30#include <linux/msg.h>
  31#include <linux/mm.h>
  32#include <linux/shm.h>
  33#include <linux/slab.h>
  34#include <linux/uio.h>
  35#include <linux/socket.h>
  36#include <linux/quota.h>
  37#include <linux/poll.h>
  38#include <linux/eventpoll.h>
  39#include <linux/personality.h>
  40#include <linux/ptrace.h>
  41#include <linux/regset.h>
  42#include <linux/stat.h>
  43#include <linux/ipc.h>
  44#include <linux/capability.h>
  45#include <linux/compat.h>
  46#include <linux/vfs.h>
  47#include <linux/mman.h>
  48#include <linux/mutex.h>
  49
  50#include <asm/intrinsics.h>
  51#include <asm/types.h>
  52#include <asm/uaccess.h>
  53#include <asm/unistd.h>
  54
  55#include "ia32priv.h"
  56
  57#include <net/scm.h>
  58#include <net/sock.h>
  59
  60#define DEBUG   0
  61
  62#if DEBUG
  63# define DBG(fmt...)    printk(KERN_DEBUG fmt)
  64#else
  65# define DBG(fmt...)
  66#endif
  67
  68#define ROUND_UP(x,a)   ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
  69
  70#define OFFSET4K(a)             ((a) & 0xfff)
  71#define PAGE_START(addr)        ((addr) & PAGE_MASK)
  72#define MINSIGSTKSZ_IA32        2048
  73
  74#define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid))
  75#define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid))
  76
  77/*
  78 * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore
  79 * while doing so.
  80 */
  81/* XXX make per-mm: */
  82static DEFINE_MUTEX(ia32_mmap_mutex);
  83
  84asmlinkage long
  85sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,
  86              struct pt_regs *regs)
  87{
  88        long error;
  89        char *filename;
  90        unsigned long old_map_base, old_task_size, tssd;
  91
  92        filename = getname(name);
  93        error = PTR_ERR(filename);
  94        if (IS_ERR(filename))
  95                return error;
  96
  97        old_map_base  = current->thread.map_base;
  98        old_task_size = current->thread.task_size;
  99        tssd = ia64_get_kr(IA64_KR_TSSD);
 100
 101        /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */
 102        current->thread.map_base  = DEFAULT_MAP_BASE;
 103        current->thread.task_size = DEFAULT_TASK_SIZE;
 104        ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
 105        ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
 106
 107        error = compat_do_execve(filename, argv, envp, regs);
 108        putname(filename);
 109
 110        if (error < 0) {
 111                /* oops, execve failed, switch back to old values... */
 112                ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
 113                ia64_set_kr(IA64_KR_TSSD, tssd);
 114                current->thread.map_base  = old_map_base;
 115                current->thread.task_size = old_task_size;
 116        }
 117
 118        return error;
 119}
 120
 121int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
 122{
 123        compat_ino_t ino;
 124        int err;
 125
 126        if ((u64) stat->size > MAX_NON_LFS ||
 127            !old_valid_dev(stat->dev) ||
 128            !old_valid_dev(stat->rdev))
 129                return -EOVERFLOW;
 130
 131        ino = stat->ino;
 132        if (sizeof(ino) < sizeof(stat->ino) && ino != stat->ino)
 133                return -EOVERFLOW;
 134
 135        if (clear_user(ubuf, sizeof(*ubuf)))
 136                return -EFAULT;
 137
 138        err  = __put_user(old_encode_dev(stat->dev), &ubuf->st_dev);
 139        err |= __put_user(ino, &ubuf->st_ino);
 140        err |= __put_user(stat->mode, &ubuf->st_mode);
 141        err |= __put_user(stat->nlink, &ubuf->st_nlink);
 142        err |= __put_user(high2lowuid(stat->uid), &ubuf->st_uid);
 143        err |= __put_user(high2lowgid(stat->gid), &ubuf->st_gid);
 144        err |= __put_user(old_encode_dev(stat->rdev), &ubuf->st_rdev);
 145        err |= __put_user(stat->size, &ubuf->st_size);
 146        err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime);
 147        err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec);
 148        err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime);
 149        err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec);
 150        err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime);
 151        err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec);
 152        err |= __put_user(stat->blksize, &ubuf->st_blksize);
 153        err |= __put_user(stat->blocks, &ubuf->st_blocks);
 154        return err;
 155}
 156
 157#if PAGE_SHIFT > IA32_PAGE_SHIFT
 158
 159
 160static int
 161get_page_prot (struct vm_area_struct *vma, unsigned long addr)
 162{
 163        int prot = 0;
 164
 165        if (!vma || vma->vm_start > addr)
 166                return 0;
 167
 168        if (vma->vm_flags & VM_READ)
 169                prot |= PROT_READ;
 170        if (vma->vm_flags & VM_WRITE)
 171                prot |= PROT_WRITE;
 172        if (vma->vm_flags & VM_EXEC)
 173                prot |= PROT_EXEC;
 174        return prot;
 175}
 176
 177/*
 178 * Map a subpage by creating an anonymous page that contains the union of the old page and
 179 * the subpage.
 180 */
 181static unsigned long
 182mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,
 183              loff_t off)
 184{
 185        void *page = NULL;
 186        struct inode *inode;
 187        unsigned long ret = 0;
 188        struct vm_area_struct *vma = find_vma(current->mm, start);
 189        int old_prot = get_page_prot(vma, start);
 190
 191        DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",
 192            file, start, end, prot, flags, off);
 193
 194
 195        /* Optimize the case where the old mmap and the new mmap are both anonymous */
 196        if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {
 197                if (clear_user((void __user *) start, end - start)) {
 198                        ret = -EFAULT;
 199                        goto out;
 200                }
 201                goto skip_mmap;
 202        }
 203
 204        page = (void *) get_zeroed_page(GFP_KERNEL);
 205        if (!page)
 206                return -ENOMEM;
 207
 208        if (old_prot)
 209                copy_from_user(page, (void __user *) PAGE_START(start), PAGE_SIZE);
 210
 211        down_write(&current->mm->mmap_sem);
 212        {
 213                ret = do_mmap(NULL, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE,
 214                              flags | MAP_FIXED | MAP_ANONYMOUS, 0);
 215        }
 216        up_write(&current->mm->mmap_sem);
 217
 218        if (IS_ERR((void *) ret))
 219                goto out;
 220
 221        if (old_prot) {
 222                /* copy back the old page contents.  */
 223                if (offset_in_page(start))
 224                        copy_to_user((void __user *) PAGE_START(start), page,
 225                                     offset_in_page(start));
 226                if (offset_in_page(end))
 227                        copy_to_user((void __user *) end, page + offset_in_page(end),
 228                                     PAGE_SIZE - offset_in_page(end));
 229        }
 230
 231        if (!(flags & MAP_ANONYMOUS)) {
 232                /* read the file contents */
 233                inode = file->f_path.dentry->d_inode;
 234                if (!inode->i_fop || !file->f_op->read
 235                    || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0))
 236                {
 237                        ret = -EINVAL;
 238                        goto out;
 239                }
 240        }
 241
 242 skip_mmap:
 243        if (!(prot & PROT_WRITE))
 244                ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);
 245  out:
 246        if (page)
 247                free_page((unsigned long) page);
 248        return ret;
 249}
 250
 251/* SLAB cache for ia64_partial_page structures */
 252struct kmem_cache *ia64_partial_page_cachep;
 253
 254/*
 255 * init ia64_partial_page_list.
 256 * return 0 means kmalloc fail.
 257 */
 258struct ia64_partial_page_list*
 259ia32_init_pp_list(void)
 260{
 261        struct ia64_partial_page_list *p;
 262
 263        if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
 264                return p;
 265        p->pp_head = NULL;
 266        p->ppl_rb = RB_ROOT;
 267        p->pp_hint = NULL;
 268        atomic_set(&p->pp_count, 1);
 269        return p;
 270}
 271
 272/*
 273 * Search for the partial page with @start in partial page list @ppl.
 274 * If finds the partial page, return the found partial page.
 275 * Else, return 0 and provide @pprev, @rb_link, @rb_parent to
 276 * be used by later __ia32_insert_pp().
 277 */
 278static struct ia64_partial_page *
 279__ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start,
 280        struct ia64_partial_page **pprev, struct rb_node ***rb_link,
 281        struct rb_node **rb_parent)
 282{
 283        struct ia64_partial_page *pp;
 284        struct rb_node **__rb_link, *__rb_parent, *rb_prev;
 285
 286        pp = ppl->pp_hint;
 287        if (pp && pp->base == start)
 288                return pp;
 289
 290        __rb_link = &ppl->ppl_rb.rb_node;
 291        rb_prev = __rb_parent = NULL;
 292
 293        while (*__rb_link) {
 294                __rb_parent = *__rb_link;
 295                pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb);
 296
 297                if (pp->base == start) {
 298                        ppl->pp_hint = pp;
 299                        return pp;
 300                } else if (pp->base < start) {
 301                        rb_prev = __rb_parent;
 302                        __rb_link = &__rb_parent->rb_right;
 303                } else {
 304                        __rb_link = &__rb_parent->rb_left;
 305                }
 306        }
 307
 308        *rb_link = __rb_link;
 309        *rb_parent = __rb_parent;
 310        *pprev = NULL;
 311        if (rb_prev)
 312                *pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb);
 313        return NULL;
 314}
 315
 316/*
 317 * insert @pp into @ppl.
 318 */
 319static void
 320__ia32_insert_pp(struct ia64_partial_page_list *ppl,
 321        struct ia64_partial_page *pp, struct ia64_partial_page *prev,
 322        struct rb_node **rb_link, struct rb_node *rb_parent)
 323{
 324        /* link list */
 325        if (prev) {
 326                pp->next = prev->next;
 327                prev->next = pp;
 328        } else {
 329                ppl->pp_head = pp;
 330                if (rb_parent)
 331                        pp->next = rb_entry(rb_parent,
 332                                struct ia64_partial_page, pp_rb);
 333                else
 334                        pp->next = NULL;
 335        }
 336
 337        /* link rb */
 338        rb_link_node(&pp->pp_rb, rb_parent, rb_link);
 339        rb_insert_color(&pp->pp_rb, &ppl->ppl_rb);
 340
 341        ppl->pp_hint = pp;
 342}
 343
 344/*
 345 * delete @pp from partial page list @ppl.
 346 */
 347static void
 348__ia32_delete_pp(struct ia64_partial_page_list *ppl,
 349        struct ia64_partial_page *pp, struct ia64_partial_page *prev)
 350{
 351        if (prev) {
 352                prev->next = pp->next;
 353                if (ppl->pp_hint == pp)
 354                        ppl->pp_hint = prev;
 355        } else {
 356                ppl->pp_head = pp->next;
 357                if (ppl->pp_hint == pp)
 358                        ppl->pp_hint = pp->next;
 359        }
 360        rb_erase(&pp->pp_rb, &ppl->ppl_rb);
 361        kmem_cache_free(ia64_partial_page_cachep, pp);
 362}
 363
 364static struct ia64_partial_page *
 365__pp_prev(struct ia64_partial_page *pp)
 366{
 367        struct rb_node *prev = rb_prev(&pp->pp_rb);
 368        if (prev)
 369                return rb_entry(prev, struct ia64_partial_page, pp_rb);
 370        else
 371                return NULL;
 372}
 373
 374/*
 375 * Delete partial pages with address between @start and @end.
 376 * @start and @end are page aligned.
 377 */
 378static void
 379__ia32_delete_pp_range(unsigned int start, unsigned int end)
 380{
 381        struct ia64_partial_page *pp, *prev;
 382        struct rb_node **rb_link, *rb_parent;
 383
 384        if (start >= end)
 385                return;
 386
 387        pp = __ia32_find_pp(current->thread.ppl, start, &prev,
 388                                        &rb_link, &rb_parent);
 389        if (pp)
 390                prev = __pp_prev(pp);
 391        else {
 392                if (prev)
 393                        pp = prev->next;
 394                else
 395                        pp = current->thread.ppl->pp_head;
 396        }
 397
 398        while (pp && pp->base < end) {
 399                struct ia64_partial_page *tmp = pp->next;
 400                __ia32_delete_pp(current->thread.ppl, pp, prev);
 401                pp = tmp;
 402        }
 403}
 404
 405/*
 406 * Set the range between @start and @end in bitmap.
 407 * @start and @end should be IA32 page aligned and in the same IA64 page.
 408 */
 409static int
 410__ia32_set_pp(unsigned int start, unsigned int end, int flags)
 411{
 412        struct ia64_partial_page *pp, *prev;
 413        struct rb_node ** rb_link, *rb_parent;
 414        unsigned int pstart, start_bit, end_bit, i;
 415
 416        pstart = PAGE_START(start);
 417        start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
 418        end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
 419        if (end_bit == 0)
 420                end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
 421        pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
 422                                        &rb_link, &rb_parent);
 423        if (pp) {
 424                for (i = start_bit; i < end_bit; i++)
 425                        set_bit(i, &pp->bitmap);
 426                /*
 427                 * Check: if this partial page has been set to a full page,
 428                 * then delete it.
 429                 */
 430                if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >=
 431                                PAGE_SIZE/IA32_PAGE_SIZE) {
 432                        __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
 433                }
 434                return 0;
 435        }
 436
 437        /*
 438         * MAP_FIXED may lead to overlapping mmap.
 439         * In this case, the requested mmap area may already mmaped as a full
 440         * page. So check vma before adding a new partial page.
 441         */
 442        if (flags & MAP_FIXED) {
 443                struct vm_area_struct *vma = find_vma(current->mm, pstart);
 444                if (vma && vma->vm_start <= pstart)
 445                        return 0;
 446        }
 447
 448        /* new a ia64_partial_page */
 449        pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
 450        if (!pp)
 451                return -ENOMEM;
 452        pp->base = pstart;
 453        pp->bitmap = 0;
 454        for (i=start_bit; i<end_bit; i++)
 455                set_bit(i, &(pp->bitmap));
 456        pp->next = NULL;
 457        __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
 458        return 0;
 459}
 460
 461/*
 462 * @start and @end should be IA32 page aligned, but don't need to be in the
 463 * same IA64 page. Split @start and @end to make sure they're in the same IA64
 464 * page, then call __ia32_set_pp().
 465 */
 466static void
 467ia32_set_pp(unsigned int start, unsigned int end, int flags)
 468{
 469        down_write(&current->mm->mmap_sem);
 470        if (flags & MAP_FIXED) {
 471                /*
 472                 * MAP_FIXED may lead to overlapping mmap. When this happens,
 473                 * a series of complete IA64 pages results in deletion of
 474                 * old partial pages in that range.
 475                 */
 476                __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
 477        }
 478
 479        if (end < PAGE_ALIGN(start)) {
 480                __ia32_set_pp(start, end, flags);
 481        } else {
 482                if (offset_in_page(start))
 483                        __ia32_set_pp(start, PAGE_ALIGN(start), flags);
 484                if (offset_in_page(end))
 485                        __ia32_set_pp(PAGE_START(end), end, flags);
 486        }
 487        up_write(&current->mm->mmap_sem);
 488}
 489
 490/*
 491 * Unset the range between @start and @end in bitmap.
 492 * @start and @end should be IA32 page aligned and in the same IA64 page.
 493 * After doing that, if the bitmap is 0, then free the page and return 1,
 494 *      else return 0;
 495 * If not find the partial page in the list, then
 496 *      If the vma exists, then the full page is set to a partial page;
 497 *      Else return -ENOMEM.
 498 */
 499static int
 500__ia32_unset_pp(unsigned int start, unsigned int end)
 501{
 502        struct ia64_partial_page *pp, *prev;
 503        struct rb_node ** rb_link, *rb_parent;
 504        unsigned int pstart, start_bit, end_bit, i;
 505        struct vm_area_struct *vma;
 506
 507        pstart = PAGE_START(start);
 508        start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
 509        end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
 510        if (end_bit == 0)
 511                end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
 512
 513        pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
 514                                        &rb_link, &rb_parent);
 515        if (pp) {
 516                for (i = start_bit; i < end_bit; i++)
 517                        clear_bit(i, &pp->bitmap);
 518                if (pp->bitmap == 0) {
 519                        __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
 520                        return 1;
 521                }
 522                return 0;
 523        }
 524
 525        vma = find_vma(current->mm, pstart);
 526        if (!vma || vma->vm_start > pstart) {
 527                return -ENOMEM;
 528        }
 529
 530        /* new a ia64_partial_page */
 531        pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
 532        if (!pp)
 533                return -ENOMEM;
 534        pp->base = pstart;
 535        pp->bitmap = 0;
 536        for (i = 0; i < start_bit; i++)
 537                set_bit(i, &(pp->bitmap));
 538        for (i = end_bit; i < PAGE_SIZE / IA32_PAGE_SIZE; i++)
 539                set_bit(i, &(pp->bitmap));
 540        pp->next = NULL;
 541        __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
 542        return 0;
 543}
 544
 545/*
 546 * Delete pp between PAGE_ALIGN(start) and PAGE_START(end) by calling
 547 * __ia32_delete_pp_range(). Unset possible partial pages by calling
 548 * __ia32_unset_pp().
 549 * The returned value see __ia32_unset_pp().
 550 */
 551static int
 552ia32_unset_pp(unsigned int *startp, unsigned int *endp)
 553{
 554        unsigned int start = *startp, end = *endp;
 555        int ret = 0;
 556
 557        down_write(&current->mm->mmap_sem);
 558
 559        __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
 560
 561        if (end < PAGE_ALIGN(start)) {
 562                ret = __ia32_unset_pp(start, end);
 563                if (ret == 1) {
 564                        *startp = PAGE_START(start);
 565                        *endp = PAGE_ALIGN(end);
 566                }
 567                if (ret == 0) {
 568                        /* to shortcut sys_munmap() in sys32_munmap() */
 569                        *startp = PAGE_START(start);
 570                        *endp = PAGE_START(end);
 571                }
 572        } else {
 573                if (offset_in_page(start)) {
 574                        ret = __ia32_unset_pp(start, PAGE_ALIGN(start));
 575                        if (ret == 1)
 576                                *startp = PAGE_START(start);
 577                        if (ret == 0)
 578                                *startp = PAGE_ALIGN(start);
 579                        if (ret < 0)
 580                                goto out;
 581                }
 582                if (offset_in_page(end)) {
 583                        ret = __ia32_unset_pp(PAGE_START(end), end);
 584                        if (ret == 1)
 585                                *endp = PAGE_ALIGN(end);
 586                        if (ret == 0)
 587                                *endp = PAGE_START(end);
 588                }
 589        }
 590
 591 out:
 592        up_write(&current->mm->mmap_sem);
 593        return ret;
 594}
 595
 596/*
 597 * Compare the range between @start and @end with bitmap in partial page.
 598 * @start and @end should be IA32 page aligned and in the same IA64 page.
 599 */
 600static int
 601__ia32_compare_pp(unsigned int start, unsigned int end)
 602{
 603        struct ia64_partial_page *pp, *prev;
 604        struct rb_node ** rb_link, *rb_parent;
 605        unsigned int pstart, start_bit, end_bit, size;
 606        unsigned int first_bit, next_zero_bit;  /* the first range in bitmap */
 607
 608        pstart = PAGE_START(start);
 609
 610        pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
 611                                        &rb_link, &rb_parent);
 612        if (!pp)
 613                return 1;
 614
 615        start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
 616        end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
 617        size = sizeof(pp->bitmap) * 8;
 618        first_bit = find_first_bit(&pp->bitmap, size);
 619        next_zero_bit = find_next_zero_bit(&pp->bitmap, size, first_bit);
 620        if ((start_bit < first_bit) || (end_bit > next_zero_bit)) {
 621                /* exceeds the first range in bitmap */
 622                return -ENOMEM;
 623        } else if ((start_bit == first_bit) && (end_bit == next_zero_bit)) {
 624                first_bit = find_next_bit(&pp->bitmap, size, next_zero_bit);
 625                if ((next_zero_bit < first_bit) && (first_bit < size))
 626                        return 1;       /* has next range */
 627                else
 628                        return 0;       /* no next range */
 629        } else
 630                return 1;
 631}
 632
 633/*
 634 * @start and @end should be IA32 page aligned, but don't need to be in the
 635 * same IA64 page. Split @start and @end to make sure they're in the same IA64
 636 * page, then call __ia32_compare_pp().
 637 *
 638 * Take this as example: the range is the 1st and 2nd 4K page.
 639 * Return 0 if they fit bitmap exactly, i.e. bitmap = 00000011;
 640 * Return 1 if the range doesn't cover whole bitmap, e.g. bitmap = 00001111;
 641 * Return -ENOMEM if the range exceeds the bitmap, e.g. bitmap = 00000001 or
 642 *      bitmap = 00000101.
 643 */
 644static int
 645ia32_compare_pp(unsigned int *startp, unsigned int *endp)
 646{
 647        unsigned int start = *startp, end = *endp;
 648        int retval = 0;
 649
 650        down_write(&current->mm->mmap_sem);
 651
 652        if (end < PAGE_ALIGN(start)) {
 653                retval = __ia32_compare_pp(start, end);
 654                if (retval == 0) {
 655                        *startp = PAGE_START(start);
 656                        *endp = PAGE_ALIGN(end);
 657                }
 658        } else {
 659                if (offset_in_page(start)) {
 660                        retval = __ia32_compare_pp(start,
 661                                                   PAGE_ALIGN(start));
 662                        if (retval == 0)
 663                                *startp = PAGE_START(start);
 664                        if (retval < 0)
 665                                goto out;
 666                }
 667                if (offset_in_page(end)) {
 668                        retval = __ia32_compare_pp(PAGE_START(end), end);
 669                        if (retval == 0)
 670                                *endp = PAGE_ALIGN(end);
 671                }
 672        }
 673
 674 out:
 675        up_write(&current->mm->mmap_sem);
 676        return retval;
 677}
 678
 679static void
 680__ia32_drop_pp_list(struct ia64_partial_page_list *ppl)
 681{
 682        struct ia64_partial_page *pp = ppl->pp_head;
 683
 684        while (pp) {
 685                struct ia64_partial_page *next = pp->next;
 686                kmem_cache_free(ia64_partial_page_cachep, pp);
 687                pp = next;
 688        }
 689
 690        kfree(ppl);
 691}
 692
 693void
 694ia32_drop_ia64_partial_page_list(struct task_struct *task)
 695{
 696        struct ia64_partial_page_list* ppl = task->thread.ppl;
 697
 698        if (ppl && atomic_dec_and_test(&ppl->pp_count))
 699                __ia32_drop_pp_list(ppl);
 700}
 701
 702/*
 703 * Copy current->thread.ppl to ppl (already initialized).
 704 */
 705static int
 706__ia32_copy_pp_list(struct ia64_partial_page_list *ppl)
 707{
 708        struct ia64_partial_page *pp, *tmp, *prev;
 709        struct rb_node **rb_link, *rb_parent;
 710
 711        ppl->pp_head = NULL;
 712        ppl->pp_hint = NULL;
 713        ppl->ppl_rb = RB_ROOT;
 714        rb_link = &ppl->ppl_rb.rb_node;
 715        rb_parent = NULL;
 716        prev = NULL;
 717
 718        for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
 719                tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
 720                if (!tmp)
 721                        return -ENOMEM;
 722                *tmp = *pp;
 723                __ia32_insert_pp(ppl, tmp, prev, rb_link, rb_parent);
 724                prev = tmp;
 725                rb_link = &tmp->pp_rb.rb_right;
 726                rb_parent = &tmp->pp_rb;
 727        }
 728        return 0;
 729}
 730
 731int
 732ia32_copy_ia64_partial_page_list(struct task_struct *p,
 733                                unsigned long clone_flags)
 734{
 735        int retval = 0;
 736
 737        if (clone_flags & CLONE_VM) {
 738                atomic_inc(&current->thread.ppl->pp_count);
 739                p->thread.ppl = current->thread.ppl;
 740        } else {
 741                p->thread.ppl = ia32_init_pp_list();
 742                if (!p->thread.ppl)
 743                        return -ENOMEM;
 744                down_write(&current->mm->mmap_sem);
 745                {
 746                        retval = __ia32_copy_pp_list(p->thread.ppl);
 747                }
 748                up_write(&current->mm->mmap_sem);
 749        }
 750
 751        return retval;
 752}
 753
 754static unsigned long
 755emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags,
 756              loff_t off)
 757{
 758        unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0;
 759        struct inode *inode;
 760        loff_t poff;
 761
 762        end = start + len;
 763        pstart = PAGE_START(start);
 764        pend = PAGE_ALIGN(end);
 765
 766        if (flags & MAP_FIXED) {
 767                ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
 768                if (start > pstart) {
 769                        if (flags & MAP_SHARED)
 770                                printk(KERN_INFO
 771                                       "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n",
 772                                       current->comm, task_pid_nr(current), start);
 773                        ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags,
 774                                           off);
 775                        if (IS_ERR((void *) ret))
 776                                return ret;
 777                        pstart += PAGE_SIZE;
 778                        if (pstart >= pend)
 779                                goto out;       /* done */
 780                }
 781                if (end < pend) {
 782                        if (flags & MAP_SHARED)
 783                                printk(KERN_INFO
 784                                       "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n",
 785                                       current->comm, task_pid_nr(current), end);
 786                        ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags,
 787                                           (off + len) - offset_in_page(end));
 788                        if (IS_ERR((void *) ret))
 789                                return ret;
 790                        pend -= PAGE_SIZE;
 791                        if (pstart >= pend)
 792                                goto out;       /* done */
 793                }
 794        } else {
 795                /*
 796                 * If a start address was specified, use it if the entire rounded out area
 797                 * is available.
 798                 */
 799                if (start && !pstart)
 800                        fudge = 1;      /* handle case of mapping to range (0,PAGE_SIZE) */
 801                tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags);
 802                if (tmp != pstart) {
 803                        pstart = tmp;
 804                        start = pstart + offset_in_page(off);   /* make start congruent with off */
 805                        end = start + len;
 806                        pend = PAGE_ALIGN(end);
 807                }
 808        }
 809
 810        poff = off + (pstart - start);  /* note: (pstart - start) may be negative */
 811        is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0);
 812
 813        if ((flags & MAP_SHARED) && !is_congruent)
 814                printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap "
 815                       "(addr=0x%lx,off=0x%llx)\n", current->comm, task_pid_nr(current), start, off);
 816
 817        DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend,
 818            is_congruent ? "congruent" : "not congruent", poff);
 819
 820        down_write(&current->mm->mmap_sem);
 821        {
 822                if (!(flags & MAP_ANONYMOUS) && is_congruent)
 823                        ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff);
 824                else
 825                        ret = do_mmap(NULL, pstart, pend - pstart,
 826                                      prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE),
 827                                      flags | MAP_FIXED | MAP_ANONYMOUS, 0);
 828        }
 829        up_write(&current->mm->mmap_sem);
 830
 831        if (IS_ERR((void *) ret))
 832                return ret;
 833
 834        if (!is_congruent) {
 835                /* read the file contents */
 836                inode = file->f_path.dentry->d_inode;
 837                if (!inode->i_fop || !file->f_op->read
 838                    || ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff)
 839                        < 0))
 840                {
 841                        sys_munmap(pstart, pend - pstart);
 842                        return -EINVAL;
 843                }
 844                if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0)
 845                        return -EINVAL;
 846        }
 847
 848        if (!(flags & MAP_FIXED))
 849                ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
 850out:
 851        return start;
 852}
 853
 854#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
 855
 856static inline unsigned int
 857get_prot32 (unsigned int prot)
 858{
 859        if (prot & PROT_WRITE)
 860                /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */
 861                prot |= PROT_READ | PROT_WRITE | PROT_EXEC;
 862        else if (prot & (PROT_READ | PROT_EXEC))
 863                /* on x86, there is no distinction between PROT_READ and PROT_EXEC */
 864                prot |= (PROT_READ | PROT_EXEC);
 865
 866        return prot;
 867}
 868
 869unsigned long
 870ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags,
 871              loff_t offset)
 872{
 873        DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n",
 874            file, addr, len, prot, flags, offset);
 875
 876        if (file && (!file->f_op || !file->f_op->mmap))
 877                return -ENODEV;
 878
 879        len = IA32_PAGE_ALIGN(len);
 880        if (len == 0)
 881                return addr;
 882
 883        if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len)
 884        {
 885                if (flags & MAP_FIXED)
 886                        return -ENOMEM;
 887                else
 888                return -EINVAL;
 889        }
 890
 891        if (OFFSET4K(offset))
 892                return -EINVAL;
 893
 894        prot = get_prot32(prot);
 895
 896#if PAGE_SHIFT > IA32_PAGE_SHIFT
 897        mutex_lock(&ia32_mmap_mutex);
 898        {
 899                addr = emulate_mmap(file, addr, len, prot, flags, offset);
 900        }
 901        mutex_unlock(&ia32_mmap_mutex);
 902#else
 903        down_write(&current->mm->mmap_sem);
 904        {
 905                addr = do_mmap(file, addr, len, prot, flags, offset);
 906        }
 907        up_write(&current->mm->mmap_sem);
 908#endif
 909        DBG("ia32_do_mmap: returning 0x%lx\n", addr);
 910        return addr;
 911}
 912
 913/*
 914 * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these
 915 * system calls used a memory block for parameter passing..
 916 */
 917
 918struct mmap_arg_struct {
 919        unsigned int addr;
 920        unsigned int len;
 921        unsigned int prot;
 922        unsigned int flags;
 923        unsigned int fd;
 924        unsigned int offset;
 925};
 926
 927asmlinkage long
 928sys32_mmap (struct mmap_arg_struct __user *arg)
 929{
 930        struct mmap_arg_struct a;
 931        struct file *file = NULL;
 932        unsigned long addr;
 933        int flags;
 934
 935        if (copy_from_user(&a, arg, sizeof(a)))
 936                return -EFAULT;
 937
 938        if (OFFSET4K(a.offset))
 939                return -EINVAL;
 940
 941        flags = a.flags;
 942
 943        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 944        if (!(flags & MAP_ANONYMOUS)) {
 945                file = fget(a.fd);
 946                if (!file)
 947                        return -EBADF;
 948        }
 949
 950        addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset);
 951
 952        if (file)
 953                fput(file);
 954        return addr;
 955}
 956
 957asmlinkage long
 958sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags,
 959             unsigned int fd, unsigned int pgoff)
 960{
 961        struct file *file = NULL;
 962        unsigned long retval;
 963
 964        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 965        if (!(flags & MAP_ANONYMOUS)) {
 966                file = fget(fd);
 967                if (!file)
 968                        return -EBADF;
 969        }
 970
 971        retval = ia32_do_mmap(file, addr, len, prot, flags,
 972                              (unsigned long) pgoff << IA32_PAGE_SHIFT);
 973
 974        if (file)
 975                fput(file);
 976        return retval;
 977}
 978
 979asmlinkage long
 980sys32_munmap (unsigned int start, unsigned int len)
 981{
 982        unsigned int end = start + len;
 983        long ret;
 984
 985#if PAGE_SHIFT <= IA32_PAGE_SHIFT
 986        ret = sys_munmap(start, end - start);
 987#else
 988        if (OFFSET4K(start))
 989                return -EINVAL;
 990
 991        end = IA32_PAGE_ALIGN(end);
 992        if (start >= end)
 993                return -EINVAL;
 994
 995        ret = ia32_unset_pp(&start, &end);
 996        if (ret < 0)
 997                return ret;
 998
 999        if (start >= end)
1000                return 0;
1001
1002        mutex_lock(&ia32_mmap_mutex);
1003        ret = sys_munmap(start, end - start);
1004        mutex_unlock(&ia32_mmap_mutex);
1005#endif
1006        return ret;
1007}
1008
1009#if PAGE_SHIFT > IA32_PAGE_SHIFT
1010
1011/*
1012 * When mprotect()ing a partial page, we set the permission to the union of the old
1013 * settings and the new settings.  In other words, it's only possible to make access to a
1014 * partial page less restrictive.
1015 */
1016static long
1017mprotect_subpage (unsigned long address, int new_prot)
1018{
1019        int old_prot;
1020        struct vm_area_struct *vma;
1021
1022        if (new_prot == PROT_NONE)
1023                return 0;               /* optimize case where nothing changes... */
1024        vma = find_vma(current->mm, address);
1025        old_prot = get_page_prot(vma, address);
1026        return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot);
1027}
1028
1029#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
1030
1031asmlinkage long
1032sys32_mprotect (unsigned int start, unsigned int len, int prot)
1033{
1034        unsigned int end = start + len;
1035#if PAGE_SHIFT > IA32_PAGE_SHIFT
1036        long retval = 0;
1037#endif
1038
1039        prot = get_prot32(prot);
1040
1041#if PAGE_SHIFT <= IA32_PAGE_SHIFT
1042        return sys_mprotect(start, end - start, prot);
1043#else
1044        if (OFFSET4K(start))
1045                return -EINVAL;
1046
1047        end = IA32_PAGE_ALIGN(end);
1048        if (end < start)
1049                return -EINVAL;
1050
1051        retval = ia32_compare_pp(&start, &end);
1052
1053        if (retval < 0)
1054                return retval;
1055
1056        mutex_lock(&ia32_mmap_mutex);
1057        {
1058                if (offset_in_page(start)) {
1059                        /* start address is 4KB aligned but not page aligned. */
1060                        retval = mprotect_subpage(PAGE_START(start), prot);
1061                        if (retval < 0)
1062                                goto out;
1063
1064                        start = PAGE_ALIGN(start);
1065                        if (start >= end)
1066                                goto out;       /* retval is already zero... */
1067                }
1068
1069                if (offset_in_page(end)) {
1070                        /* end address is 4KB aligned but not page aligned. */
1071                        retval = mprotect_subpage(PAGE_START(end), prot);
1072                        if (retval < 0)
1073                                goto out;
1074
1075                        end = PAGE_START(end);
1076                }
1077                retval = sys_mprotect(start, end - start, prot);
1078        }
1079  out:
1080        mutex_unlock(&ia32_mmap_mutex);
1081        return retval;
1082#endif
1083}
1084
1085asmlinkage long
1086sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
1087                unsigned int flags, unsigned int new_addr)
1088{
1089        long ret;
1090
1091#if PAGE_SHIFT <= IA32_PAGE_SHIFT
1092        ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
1093#else
1094        unsigned int old_end, new_end;
1095
1096        if (OFFSET4K(addr))
1097                return -EINVAL;
1098
1099        old_len = IA32_PAGE_ALIGN(old_len);
1100        new_len = IA32_PAGE_ALIGN(new_len);
1101        old_end = addr + old_len;
1102        new_end = addr + new_len;
1103
1104        if (!new_len)
1105                return -EINVAL;
1106
1107        if ((flags & MREMAP_FIXED) && (OFFSET4K(new_addr)))
1108                return -EINVAL;
1109
1110        if (old_len >= new_len) {
1111                ret = sys32_munmap(addr + new_len, old_len - new_len);
1112                if (ret && old_len != new_len)
1113                        return ret;
1114                ret = addr;
1115                if (!(flags & MREMAP_FIXED) || (new_addr == addr))
1116                        return ret;
1117                old_len = new_len;
1118        }
1119
1120        addr = PAGE_START(addr);
1121        old_len = PAGE_ALIGN(old_end) - addr;
1122        new_len = PAGE_ALIGN(new_end) - addr;
1123
1124        mutex_lock(&ia32_mmap_mutex);
1125        ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
1126        mutex_unlock(&ia32_mmap_mutex);
1127
1128        if ((ret >= 0) && (old_len < new_len)) {
1129                /* mremap expanded successfully */
1130                ia32_set_pp(old_end, new_end, flags);
1131        }
1132#endif
1133        return ret;
1134}
1135
1136asmlinkage long
1137sys32_pipe (int __user *fd)
1138{
1139        int retval;
1140        int fds[2];
1141
1142        retval = do_pipe_flags(fds, 0);
1143        if (retval)
1144                goto out;
1145        if (copy_to_user(fd, fds, sizeof(fds)))
1146                retval = -EFAULT;
1147  out:
1148        return retval;
1149}
1150
1151static inline long
1152get_tv32 (struct timeval *o, struct compat_timeval __user *i)
1153{
1154        return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
1155                (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec)));
1156}
1157
1158static inline long
1159put_tv32 (struct compat_timeval __user *o, struct timeval *i)
1160{
1161        return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
1162                (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec)));
1163}
1164
1165asmlinkage unsigned long
1166sys32_alarm (unsigned int seconds)
1167{
1168        return alarm_setitimer(seconds);
1169}
1170
1171/* Translations due to time_t size differences.  Which affects all
1172   sorts of things, like timeval and itimerval.  */
1173
1174extern struct timezone sys_tz;
1175
1176asmlinkage long
1177sys32_gettimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
1178{
1179        if (tv) {
1180                struct timeval ktv;
1181                do_gettimeofday(&ktv);
1182                if (put_tv32(tv, &ktv))
1183                        return -EFAULT;
1184        }
1185        if (tz) {
1186                if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
1187                        return -EFAULT;
1188        }
1189        return 0;
1190}
1191
1192asmlinkage long
1193sys32_settimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
1194{
1195        struct timeval ktv;
1196        struct timespec kts;
1197        struct timezone ktz;
1198
1199        if (tv) {
1200                if (get_tv32(&ktv, tv))
1201                        return -EFAULT;
1202                kts.tv_sec = ktv.tv_sec;
1203                kts.tv_nsec = ktv.tv_usec * 1000;
1204        }
1205        if (tz) {
1206                if (copy_from_user(&ktz, tz, sizeof(ktz)))
1207                        return -EFAULT;
1208        }
1209
1210        return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
1211}
1212
1213struct sel_arg_struct {
1214        unsigned int n;
1215        unsigned int inp;
1216        unsigned int outp;
1217        unsigned int exp;
1218        unsigned int tvp;
1219};
1220
1221asmlinkage long
1222sys32_old_select (struct sel_arg_struct __user *arg)
1223{
1224        struct sel_arg_struct a;
1225
1226        if (copy_from_user(&a, arg, sizeof(a)))
1227                return -EFAULT;
1228        return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
1229                                 compat_ptr(a.exp), compat_ptr(a.tvp));
1230}
1231
1232#define SEMOP            1
1233#define SEMGET           2
1234#define SEMCTL           3
1235#define SEMTIMEDOP       4
1236#define MSGSND          11
1237#define MSGRCV          12
1238#define MSGGET          13
1239#define MSGCTL          14
1240#define SHMAT           21
1241#define SHMDT           22
1242#define SHMGET          23
1243#define SHMCTL          24
1244
1245asmlinkage long
1246sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
1247{
1248        int version;
1249
1250        version = call >> 16; /* hack for backward compatibility */
1251        call &= 0xffff;
1252
1253        switch (call) {
1254              case SEMTIMEDOP:
1255                if (fifth)
1256                        return compat_sys_semtimedop(first, compat_ptr(ptr),
1257                                second, compat_ptr(fifth));
1258                /* else fall through for normal semop() */
1259              case SEMOP:
1260                /* struct sembuf is the same on 32 and 64bit :)) */
1261                return sys_semtimedop(first, compat_ptr(ptr), second,
1262                                      NULL);
1263              case SEMGET:
1264                return sys_semget(first, second, third);
1265              case SEMCTL:
1266                return compat_sys_semctl(first, second, third, compat_ptr(ptr));
1267
1268              case MSGSND:
1269                return compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
1270              case MSGRCV:
1271                return compat_sys_msgrcv(first, second, fifth, third, version, compat_ptr(ptr));
1272              case MSGGET:
1273                return sys_msgget((key_t) first, second);
1274              case MSGCTL:
1275                return compat_sys_msgctl(first, second, compat_ptr(ptr));
1276
1277              case SHMAT:
1278                return compat_sys_shmat(first, second, third, version, compat_ptr(ptr));
1279                break;
1280              case SHMDT:
1281                return sys_shmdt(compat_ptr(ptr));
1282              case SHMGET:
1283                return sys_shmget(first, (unsigned)second, third);
1284              case SHMCTL:
1285                return compat_sys_shmctl(first, second, compat_ptr(ptr));
1286
1287              default:
1288                return -ENOSYS;
1289        }
1290        return -EINVAL;
1291}
1292
1293asmlinkage long
1294compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options,
1295                 struct compat_rusage *ru);
1296
1297asmlinkage long
1298sys32_waitpid (int pid, unsigned int *stat_addr, int options)
1299{
1300        return compat_sys_wait4(pid, stat_addr, options, NULL);
1301}
1302
1303static unsigned int
1304ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val)
1305{
1306        size_t copied;
1307        unsigned int ret;
1308
1309        copied = access_process_vm(child, addr, val, sizeof(*val), 0);
1310        return (copied != sizeof(ret)) ? -EIO : 0;
1311}
1312
1313static unsigned int
1314ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val)
1315{
1316
1317        if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
1318                return -EIO;
1319        return 0;
1320}
1321
1322/*
1323 *  The order in which registers are stored in the ptrace regs structure
1324 */
1325#define PT_EBX  0
1326#define PT_ECX  1
1327#define PT_EDX  2
1328#define PT_ESI  3
1329#define PT_EDI  4
1330#define PT_EBP  5
1331#define PT_EAX  6
1332#define PT_DS   7
1333#define PT_ES   8
1334#define PT_FS   9
1335#define PT_GS   10
1336#define PT_ORIG_EAX 11
1337#define PT_EIP  12
1338#define PT_CS   13
1339#define PT_EFL  14
1340#define PT_UESP 15
1341#define PT_SS   16
1342
1343static unsigned int
1344getreg (struct task_struct *child, int regno)
1345{
1346        struct pt_regs *child_regs;
1347
1348        child_regs = task_pt_regs(child);
1349        switch (regno / sizeof(int)) {
1350              case PT_EBX: return child_regs->r11;
1351              case PT_ECX: return child_regs->r9;
1352              case PT_EDX: return child_regs->r10;
1353              case PT_ESI: return child_regs->r14;
1354              case PT_EDI: return child_regs->r15;
1355              case PT_EBP: return child_regs->r13;
1356              case PT_EAX: return child_regs->r8;
1357              case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */
1358              case PT_EIP: return child_regs->cr_iip;
1359              case PT_UESP: return child_regs->r12;
1360              case PT_EFL: return child->thread.eflag;
1361              case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
1362                return __USER_DS;
1363              case PT_CS: return __USER_CS;
1364              default:
1365                printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno);
1366                break;
1367        }
1368        return 0;
1369}
1370
1371static void
1372putreg (struct task_struct *child, int regno, unsigned int value)
1373{
1374        struct pt_regs *child_regs;
1375
1376        child_regs = task_pt_regs(child);
1377        switch (regno / sizeof(int)) {
1378              case PT_EBX: child_regs->r11 = value; break;
1379              case PT_ECX: child_regs->r9 = value; break;
1380              case PT_EDX: child_regs->r10 = value; break;
1381              case PT_ESI: child_regs->r14 = value; break;
1382              case PT_EDI: child_regs->r15 = value; break;
1383              case PT_EBP: child_regs->r13 = value; break;
1384              case PT_EAX: child_regs->r8 = value; break;
1385              case PT_ORIG_EAX: child_regs->r1 = value; break;
1386              case PT_EIP: child_regs->cr_iip = value; break;
1387              case PT_UESP: child_regs->r12 = value; break;
1388              case PT_EFL: child->thread.eflag = value; break;
1389              case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
1390                if (value != __USER_DS)
1391                        printk(KERN_ERR
1392                               "ia32.putreg: attempt to set invalid segment register %d = %x\n",
1393                               regno, value);
1394                break;
1395              case PT_CS:
1396                if (value != __USER_CS)
1397                        printk(KERN_ERR
1398                               "ia32.putreg: attempt to to set invalid segment register %d = %x\n",
1399                               regno, value);
1400                break;
1401              default:
1402                printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno);
1403                break;
1404        }
1405}
1406
1407static void
1408put_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
1409           struct switch_stack *swp, int tos)
1410{
1411        struct _fpreg_ia32 *f;
1412        char buf[32];
1413
1414        f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
1415        if ((regno += tos) >= 8)
1416                regno -= 8;
1417        switch (regno) {
1418              case 0:
1419                ia64f2ia32f(f, &ptp->f8);
1420                break;
1421              case 1:
1422                ia64f2ia32f(f, &ptp->f9);
1423                break;
1424              case 2:
1425                ia64f2ia32f(f, &ptp->f10);
1426                break;
1427              case 3:
1428                ia64f2ia32f(f, &ptp->f11);
1429                break;
1430              case 4:
1431              case 5:
1432              case 6:
1433              case 7:
1434                ia64f2ia32f(f, &swp->f12 + (regno - 4));
1435                break;
1436        }
1437        copy_to_user(reg, f, sizeof(*reg));
1438}
1439
1440static void
1441get_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
1442           struct switch_stack *swp, int tos)
1443{
1444
1445        if ((regno += tos) >= 8)
1446                regno -= 8;
1447        switch (regno) {
1448              case 0:
1449                copy_from_user(&ptp->f8, reg, sizeof(*reg));
1450                break;
1451              case 1:
1452                copy_from_user(&ptp->f9, reg, sizeof(*reg));
1453                break;
1454              case 2:
1455                copy_from_user(&ptp->f10, reg, sizeof(*reg));
1456                break;
1457              case 3:
1458                copy_from_user(&ptp->f11, reg, sizeof(*reg));
1459                break;
1460              case 4:
1461              case 5:
1462              case 6:
1463              case 7:
1464                copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
1465                break;
1466        }
1467        return;
1468}
1469
1470int
1471save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
1472{
1473        struct switch_stack *swp;
1474        struct pt_regs *ptp;
1475        int i, tos;
1476
1477        if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
1478                return -EFAULT;
1479
1480        __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
1481        __put_user(tsk->thread.fsr & 0xffff, &save->swd);
1482        __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
1483        __put_user(tsk->thread.fir, &save->fip);
1484        __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
1485        __put_user(tsk->thread.fdr, &save->foo);
1486        __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
1487
1488        /*
1489         *  Stack frames start with 16-bytes of temp space
1490         */
1491        swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1492        ptp = task_pt_regs(tsk);
1493        tos = (tsk->thread.fsr >> 11) & 7;
1494        for (i = 0; i < 8; i++)
1495                put_fpreg(i, &save->st_space[i], ptp, swp, tos);
1496        return 0;
1497}
1498
1499static int
1500restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
1501{
1502        struct switch_stack *swp;
1503        struct pt_regs *ptp;
1504        int i, tos;
1505        unsigned int fsrlo, fsrhi, num32;
1506
1507        if (!access_ok(VERIFY_READ, save, sizeof(*save)))
1508                return(-EFAULT);
1509
1510        __get_user(num32, (unsigned int __user *)&save->cwd);
1511        tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
1512        __get_user(fsrlo, (unsigned int __user *)&save->swd);
1513        __get_user(fsrhi, (unsigned int __user *)&save->twd);
1514        num32 = (fsrhi << 16) | fsrlo;
1515        tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
1516        __get_user(num32, (unsigned int __user *)&save->fip);
1517        tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
1518        __get_user(num32, (unsigned int __user *)&save->foo);
1519        tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
1520
1521        /*
1522         *  Stack frames start with 16-bytes of temp space
1523         */
1524        swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1525        ptp = task_pt_regs(tsk);
1526        tos = (tsk->thread.fsr >> 11) & 7;
1527        for (i = 0; i < 8; i++)
1528                get_fpreg(i, &save->st_space[i], ptp, swp, tos);
1529        return 0;
1530}
1531
1532int
1533save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
1534{
1535        struct switch_stack *swp;
1536        struct pt_regs *ptp;
1537        int i, tos;
1538        unsigned long mxcsr=0;
1539        unsigned long num128[2];
1540
1541        if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
1542                return -EFAULT;
1543
1544        __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
1545        __put_user(tsk->thread.fsr & 0xffff, &save->swd);
1546        __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
1547        __put_user(tsk->thread.fir, &save->fip);
1548        __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
1549        __put_user(tsk->thread.fdr, &save->foo);
1550        __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
1551
1552        /*
1553         *  Stack frames start with 16-bytes of temp space
1554         */
1555        swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1556        ptp = task_pt_regs(tsk);
1557        tos = (tsk->thread.fsr >> 11) & 7;
1558        for (i = 0; i < 8; i++)
1559                put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
1560
1561        mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f);
1562        __put_user(mxcsr & 0xffff, &save->mxcsr);
1563        for (i = 0; i < 8; i++) {
1564                memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long));
1565                memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long));
1566                copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32));
1567        }
1568        return 0;
1569}
1570
1571static int
1572restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
1573{
1574        struct switch_stack *swp;
1575        struct pt_regs *ptp;
1576        int i, tos;
1577        unsigned int fsrlo, fsrhi, num32;
1578        int mxcsr;
1579        unsigned long num64;
1580        unsigned long num128[2];
1581
1582        if (!access_ok(VERIFY_READ, save, sizeof(*save)))
1583                return(-EFAULT);
1584
1585        __get_user(num32, (unsigned int __user *)&save->cwd);
1586        tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
1587        __get_user(fsrlo, (unsigned int __user *)&save->swd);
1588        __get_user(fsrhi, (unsigned int __user *)&save->twd);
1589        num32 = (fsrhi << 16) | fsrlo;
1590        tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
1591        __get_user(num32, (unsigned int __user *)&save->fip);
1592        tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
1593        __get_user(num32, (unsigned int __user *)&save->foo);
1594        tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
1595
1596        /*
1597         *  Stack frames start with 16-bytes of temp space
1598         */
1599        swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1600        ptp = task_pt_regs(tsk);
1601        tos = (tsk->thread.fsr >> 11) & 7;
1602        for (i = 0; i < 8; i++)
1603        get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
1604
1605        __get_user(mxcsr, (unsigned int __user *)&save->mxcsr);
1606        num64 = mxcsr & 0xff10;
1607        tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000UL)) | (num64<<32);
1608        num64 = mxcsr & 0x3f;
1609        tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000UL)) | (num64<<32);
1610
1611        for (i = 0; i < 8; i++) {
1612                copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32));
1613                memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long));
1614                memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long));
1615        }
1616        return 0;
1617}
1618
1619asmlinkage long
1620sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data)
1621{
1622        struct task_struct *child;
1623        unsigned int value, tmp;
1624        long i, ret;
1625
1626        lock_kernel();
1627        if (request == PTRACE_TRACEME) {
1628                ret = ptrace_traceme();
1629                goto out;
1630        }
1631
1632        child = ptrace_get_task_struct(pid);
1633        if (IS_ERR(child)) {
1634                ret = PTR_ERR(child);
1635                goto out;
1636        }
1637
1638        if (request == PTRACE_ATTACH) {
1639                ret = sys_ptrace(request, pid, addr, data);
1640                goto out_tsk;
1641        }
1642
1643        ret = ptrace_check_attach(child, request == PTRACE_KILL);
1644        if (ret < 0)
1645                goto out_tsk;
1646
1647        switch (request) {
1648              case PTRACE_PEEKTEXT:
1649              case PTRACE_PEEKDATA:     /* read word at location addr */
1650                ret = ia32_peek(child, addr, &value);
1651                if (ret == 0)
1652                        ret = put_user(value, (unsigned int __user *) compat_ptr(data));
1653                else
1654                        ret = -EIO;
1655                goto out_tsk;
1656
1657              case PTRACE_POKETEXT:
1658              case PTRACE_POKEDATA:     /* write the word at location addr */
1659                ret = ia32_poke(child, addr, data);
1660                goto out_tsk;
1661
1662              case PTRACE_PEEKUSR:      /* read word at addr in USER area */
1663                ret = -EIO;
1664                if ((addr & 3) || addr > 17*sizeof(int))
1665                        break;
1666
1667                tmp = getreg(child, addr);
1668                if (!put_user(tmp, (unsigned int __user *) compat_ptr(data)))
1669                        ret = 0;
1670                break;
1671
1672              case PTRACE_POKEUSR:      /* write word at addr in USER area */
1673                ret = -EIO;
1674                if ((addr & 3) || addr > 17*sizeof(int))
1675                        break;
1676
1677                putreg(child, addr, data);
1678                ret = 0;
1679                break;
1680
1681              case IA32_PTRACE_GETREGS:
1682                if (!access_ok(VERIFY_WRITE, compat_ptr(data), 17*sizeof(int))) {
1683                        ret = -EIO;
1684                        break;
1685                }
1686                for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
1687                        put_user(getreg(child, i), (unsigned int __user *) compat_ptr(data));
1688                        data += sizeof(int);
1689                }
1690                ret = 0;
1691                break;
1692
1693              case IA32_PTRACE_SETREGS:
1694                if (!access_ok(VERIFY_READ, compat_ptr(data), 17*sizeof(int))) {
1695                        ret = -EIO;
1696                        break;
1697                }
1698                for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
1699                        get_user(tmp, (unsigned int __user *) compat_ptr(data));
1700                        putreg(child, i, tmp);
1701                        data += sizeof(int);
1702                }
1703                ret = 0;
1704                break;
1705
1706              case IA32_PTRACE_GETFPREGS:
1707                ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
1708                                        compat_ptr(data));
1709                break;
1710
1711              case IA32_PTRACE_GETFPXREGS:
1712                ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
1713                                         compat_ptr(data));
1714                break;
1715
1716              case IA32_PTRACE_SETFPREGS:
1717                ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
1718                                           compat_ptr(data));
1719                break;
1720
1721              case IA32_PTRACE_SETFPXREGS:
1722                ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
1723                                            compat_ptr(data));
1724                break;
1725
1726              case PTRACE_GETEVENTMSG:   
1727                ret = put_user(child->ptrace_message, (unsigned int __user *) compat_ptr(data));
1728                break;
1729
1730              case PTRACE_SYSCALL:      /* continue, stop after next syscall */
1731              case PTRACE_CONT:         /* restart after signal. */
1732              case PTRACE_KILL:
1733              case PTRACE_SINGLESTEP:   /* execute chile for one instruction */
1734              case PTRACE_DETACH:       /* detach a process */
1735                ret = sys_ptrace(request, pid, addr, data);
1736                break;
1737
1738              default:
1739                ret = ptrace_request(child, request, addr, data);
1740                break;
1741
1742        }
1743  out_tsk:
1744        put_task_struct(child);
1745  out:
1746        unlock_kernel();
1747        return ret;
1748}
1749
1750typedef struct {
1751        unsigned int    ss_sp;
1752        unsigned int    ss_flags;
1753        unsigned int    ss_size;
1754} ia32_stack_t;
1755
1756asmlinkage long
1757sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32,
1758                   long arg2, long arg3, long arg4, long arg5, long arg6,
1759                   long arg7, struct pt_regs pt)
1760{
1761        stack_t uss, uoss;
1762        ia32_stack_t buf32;
1763        int ret;
1764        mm_segment_t old_fs = get_fs();
1765
1766        if (uss32) {
1767                if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t)))
1768                        return -EFAULT;
1769                uss.ss_sp = (void __user *) (long) buf32.ss_sp;
1770                uss.ss_flags = buf32.ss_flags;
1771                /* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the
1772                   check and set it to the user requested value later */
1773                if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) {
1774                        ret = -ENOMEM;
1775                        goto out;
1776                }
1777                uss.ss_size = MINSIGSTKSZ;
1778        }
1779        set_fs(KERNEL_DS);
1780        ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL,
1781                             (stack_t __user *) &uoss, pt.r12);
1782        current->sas_ss_size = buf32.ss_size;
1783        set_fs(old_fs);
1784out:
1785        if (ret < 0)
1786                return(ret);
1787        if (uoss32) {
1788                buf32.ss_sp = (long __user) uoss.ss_sp;
1789                buf32.ss_flags = uoss.ss_flags;
1790                buf32.ss_size = uoss.ss_size;
1791                if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t)))
1792                        return -EFAULT;
1793        }
1794        return ret;
1795}
1796
1797asmlinkage int
1798sys32_pause (void)
1799{
1800        current->state = TASK_INTERRUPTIBLE;
1801        schedule();
1802        return -ERESTARTNOHAND;
1803}
1804
1805asmlinkage int
1806sys32_msync (unsigned int start, unsigned int len, int flags)
1807{
1808        unsigned int addr;
1809
1810        if (OFFSET4K(start))
1811                return -EINVAL;
1812        addr = PAGE_START(start);
1813        return sys_msync(addr, len + (start - addr), flags);
1814}
1815
1816struct sysctl32 {
1817        unsigned int    name;
1818        int             nlen;
1819        unsigned int    oldval;
1820        unsigned int    oldlenp;
1821        unsigned int    newval;
1822        unsigned int    newlen;
1823        unsigned int    __unused[4];
1824};
1825
1826#ifdef CONFIG_SYSCTL_SYSCALL
1827asmlinkage long
1828sys32_sysctl (struct sysctl32 __user *args)
1829{
1830        struct sysctl32 a32;
1831        mm_segment_t old_fs = get_fs ();
1832        void __user *oldvalp, *newvalp;
1833        size_t oldlen;
1834        int __user *namep;
1835        long ret;
1836
1837        if (copy_from_user(&a32, args, sizeof(a32)))
1838                return -EFAULT;
1839
1840        /*
1841         * We need to pre-validate these because we have to disable address checking
1842         * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
1843         * user specifying bad addresses here.  Well, since we're dealing with 32 bit
1844         * addresses, we KNOW that access_ok() will always succeed, so this is an
1845         * expensive NOP, but so what...
1846         */
1847        namep = (int __user *) compat_ptr(a32.name);
1848        oldvalp = compat_ptr(a32.oldval);
1849        newvalp = compat_ptr(a32.newval);
1850
1851        if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp)))
1852            || !access_ok(VERIFY_WRITE, namep, 0)
1853            || !access_ok(VERIFY_WRITE, oldvalp, 0)
1854            || !access_ok(VERIFY_WRITE, newvalp, 0))
1855                return -EFAULT;
1856
1857        set_fs(KERNEL_DS);
1858        lock_kernel();
1859        ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen,
1860                        newvalp, (size_t) a32.newlen);
1861        unlock_kernel();
1862        set_fs(old_fs);
1863
1864        if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp)))
1865                return -EFAULT;
1866
1867        return ret;
1868}
1869#endif
1870
1871asmlinkage long
1872sys32_newuname (struct new_utsname __user *name)
1873{
1874        int ret = sys_newuname(name);
1875
1876        if (!ret)
1877                if (copy_to_user(name->machine, "i686\0\0\0", 8))
1878                        ret = -EFAULT;
1879        return ret;
1880}
1881
1882asmlinkage long
1883sys32_getresuid16 (u16 __user *ruid, u16 __user *euid, u16 __user *suid)
1884{
1885        uid_t a, b, c;
1886        int ret;
1887        mm_segment_t old_fs = get_fs();
1888
1889        set_fs(KERNEL_DS);
1890        ret = sys_getresuid((uid_t __user *) &a, (uid_t __user *) &b, (uid_t __user *) &c);
1891        set_fs(old_fs);
1892
1893        if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid))
1894                return -EFAULT;
1895        return ret;
1896}
1897
1898asmlinkage long
1899sys32_getresgid16 (u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
1900{
1901        gid_t a, b, c;
1902        int ret;
1903        mm_segment_t old_fs = get_fs();
1904
1905        set_fs(KERNEL_DS);
1906        ret = sys_getresgid((gid_t __user *) &a, (gid_t __user *) &b, (gid_t __user *) &c);
1907        set_fs(old_fs);
1908
1909        if (ret)
1910                return ret;
1911
1912        return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid);
1913}
1914
1915asmlinkage long
1916sys32_lseek (unsigned int fd, int offset, unsigned int whence)
1917{
1918        /* Sign-extension of "offset" is important here... */
1919        return sys_lseek(fd, offset, whence);
1920}
1921
1922static int
1923groups16_to_user(short __user *grouplist, struct group_info *group_info)
1924{
1925        int i;
1926        short group;
1927
1928        for (i = 0; i < group_info->ngroups; i++) {
1929                group = (short)GROUP_AT(group_info, i);
1930                if (put_user(group, grouplist+i))
1931                        return -EFAULT;
1932        }
1933
1934        return 0;
1935}
1936
1937static int
1938groups16_from_user(struct group_info *group_info, short __user *grouplist)
1939{
1940        int i;
1941        short group;
1942
1943        for (i = 0; i < group_info->ngroups; i++) {
1944                if (get_user(group, grouplist+i))
1945                        return  -EFAULT;
1946                GROUP_AT(group_info, i) = (gid_t)group;
1947        }
1948
1949        return 0;
1950}
1951
1952asmlinkage long
1953sys32_getgroups16 (int gidsetsize, short __user *grouplist)
1954{
1955        int i;
1956
1957        if (gidsetsize < 0)
1958                return -EINVAL;
1959
1960        get_group_info(current->group_info);
1961        i = current->group_info->ngroups;
1962        if (gidsetsize) {
1963                if (i > gidsetsize) {
1964                        i = -EINVAL;
1965                        goto out;
1966                }
1967                if (groups16_to_user(grouplist, current->group_info)) {
1968                        i = -EFAULT;
1969                        goto out;
1970                }
1971        }
1972out:
1973        put_group_info(current->group_info);
1974        return i;
1975}
1976
1977asmlinkage long
1978sys32_setgroups16 (int gidsetsize, short __user *grouplist)
1979{
1980        struct group_info *group_info;
1981        int retval;
1982
1983        if (!capable(CAP_SETGID))
1984                return -EPERM;
1985        if ((unsigned)gidsetsize > NGROUPS_MAX)
1986                return -EINVAL;
1987
1988        group_info = groups_alloc(gidsetsize);
1989        if (!group_info)
1990                return -ENOMEM;
1991        retval = groups16_from_user(group_info, grouplist);
1992        if (retval) {
1993                put_group_info(group_info);
1994                return retval;
1995        }
1996
1997        retval = set_current_groups(group_info);
1998        put_group_info(group_info);
1999
2000        return retval;
2001}
2002
2003asmlinkage long
2004sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi)
2005{
2006        return sys_truncate(compat_ptr(path), ((unsigned long) len_hi << 32) | len_lo);
2007}
2008
2009asmlinkage long
2010sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi)
2011{
2012        return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo);
2013}
2014
2015static int
2016putstat64 (struct stat64 __user *ubuf, struct kstat *kbuf)
2017{
2018        int err;
2019        u64 hdev;
2020
2021        if (clear_user(ubuf, sizeof(*ubuf)))
2022                return -EFAULT;
2023
2024        hdev = huge_encode_dev(kbuf->dev);
2025        err  = __put_user(hdev, (u32 __user*)&ubuf->st_dev);
2026        err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_dev) + 1);
2027        err |= __put_user(kbuf->ino, &ubuf->__st_ino);
2028        err |= __put_user(kbuf->ino, &ubuf->st_ino_lo);
2029        err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi);
2030        err |= __put_user(kbuf->mode, &ubuf->st_mode);
2031        err |= __put_user(kbuf->nlink, &ubuf->st_nlink);
2032        err |= __put_user(kbuf->uid, &ubuf->st_uid);
2033        err |= __put_user(kbuf->gid, &ubuf->st_gid);
2034        hdev = huge_encode_dev(kbuf->rdev);
2035        err  = __put_user(hdev, (u32 __user*)&ubuf->st_rdev);
2036        err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_rdev) + 1);
2037        err |= __put_user(kbuf->size, &ubuf->st_size_lo);
2038        err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi);
2039        err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime);
2040        err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec);
2041        err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime);
2042        err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec);
2043        err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime);
2044        err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec);
2045        err |= __put_user(kbuf->blksize, &ubuf->st_blksize);
2046        err |= __put_user(kbuf->blocks, &ubuf->st_blocks);
2047        return err;
2048}
2049
2050asmlinkage long
2051sys32_stat64 (char __user *filename, struct stat64 __user *statbuf)
2052{
2053        struct kstat s;
2054        long ret = vfs_stat(filename, &s);
2055        if (!ret)
2056                ret = putstat64(statbuf, &s);
2057        return ret;
2058}
2059
2060asmlinkage long
2061sys32_lstat64 (char __user *filename, struct stat64 __user *statbuf)
2062{
2063        struct kstat s;
2064        long ret = vfs_lstat(filename, &s);
2065        if (!ret)
2066                ret = putstat64(statbuf, &s);
2067        return ret;
2068}
2069
2070asmlinkage long
2071sys32_fstat64 (unsigned int fd, struct stat64 __user *statbuf)
2072{
2073        struct kstat s;
2074        long ret = vfs_fstat(fd, &s);
2075        if (!ret)
2076                ret = putstat64(statbuf, &s);
2077        return ret;
2078}
2079
2080asmlinkage long
2081sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval)
2082{
2083        mm_segment_t old_fs = get_fs();
2084        struct timespec t;
2085        long ret;
2086
2087        set_fs(KERNEL_DS);
2088        ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
2089        set_fs(old_fs);
2090        if (put_compat_timespec(&t, interval))
2091                return -EFAULT;
2092        return ret;
2093}
2094
2095asmlinkage long
2096sys32_pread (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
2097{
2098        return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
2099}
2100
2101asmlinkage long
2102sys32_pwrite (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
2103{
2104        return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
2105}
2106
2107asmlinkage long
2108sys32_sendfile (int out_fd, int in_fd, int __user *offset, unsigned int count)
2109{
2110        mm_segment_t old_fs = get_fs();
2111        long ret;
2112        off_t of;
2113
2114        if (offset && get_user(of, offset))
2115                return -EFAULT;
2116
2117        set_fs(KERNEL_DS);
2118        ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *) &of : NULL, count);
2119        set_fs(old_fs);
2120
2121        if (offset && put_user(of, offset))
2122                return -EFAULT;
2123
2124        return ret;
2125}
2126
2127asmlinkage long
2128sys32_personality (unsigned int personality)
2129{
2130        long ret;
2131
2132        if (current->personality == PER_LINUX32 && personality == PER_LINUX)
2133                personality = PER_LINUX32;
2134        ret = sys_personality(personality);
2135        if (ret == PER_LINUX32)
2136                ret = PER_LINUX;
2137        return ret;
2138}
2139
2140asmlinkage unsigned long
2141sys32_brk (unsigned int brk)
2142{
2143        unsigned long ret, obrk;
2144        struct mm_struct *mm = current->mm;
2145
2146        obrk = mm->brk;
2147        ret = sys_brk(brk);
2148        if (ret < obrk)
2149                clear_user(compat_ptr(ret), PAGE_ALIGN(ret) - ret);
2150        return ret;
2151}
2152
2153/* Structure for ia32 emulation on ia64 */
2154struct epoll_event32
2155{
2156        u32 events;
2157        u32 data[2];
2158};
2159
2160asmlinkage long
2161sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 __user *event)
2162{
2163        mm_segment_t old_fs = get_fs();
2164        struct epoll_event event64;
2165        int error;
2166        u32 data_halfword;
2167
2168        if (!access_ok(VERIFY_READ, event, sizeof(struct epoll_event32)))
2169                return -EFAULT;
2170
2171        __get_user(event64.events, &event->events);
2172        __get_user(data_halfword, &event->data[0]);
2173        event64.data = data_halfword;
2174        __get_user(data_halfword, &event->data[1]);
2175        event64.data |= (u64)data_halfword << 32;
2176
2177        set_fs(KERNEL_DS);
2178        error = sys_epoll_ctl(epfd, op, fd, (struct epoll_event __user *) &event64);
2179        set_fs(old_fs);
2180
2181        return error;
2182}
2183
2184asmlinkage long
2185sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents,
2186                 int timeout)
2187{
2188        struct epoll_event *events64 = NULL;
2189        mm_segment_t old_fs = get_fs();
2190        int numevents, size;
2191        int evt_idx;
2192        int do_free_pages = 0;
2193
2194        if (maxevents <= 0) {
2195                return -EINVAL;
2196        }
2197
2198        /* Verify that the area passed by the user is writeable */
2199        if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event32)))
2200                return -EFAULT;
2201
2202        /*
2203         * Allocate space for the intermediate copy.  If the space needed
2204         * is large enough to cause kmalloc to fail, then try again with
2205         * __get_free_pages.
2206         */
2207        size = maxevents * sizeof(struct epoll_event);
2208        events64 = kmalloc(size, GFP_KERNEL);
2209        if (events64 == NULL) {
2210                events64 = (struct epoll_event *)
2211                                __get_free_pages(GFP_KERNEL, get_order(size));
2212                if (events64 == NULL)
2213                        return -ENOMEM;
2214                do_free_pages = 1;
2215        }
2216
2217        /* Do the system call */
2218        set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/
2219        numevents = sys_epoll_wait(epfd, (struct epoll_event __user *) events64,
2220                                   maxevents, timeout);
2221        set_fs(old_fs);
2222
2223        /* Don't modify userspace memory if we're returning an error */
2224        if (numevents > 0) {
2225                /* Translate the 64-bit structures back into the 32-bit
2226                   structures */
2227                for (evt_idx = 0; evt_idx < numevents; evt_idx++) {
2228                        __put_user(events64[evt_idx].events,
2229                                   &events[evt_idx].events);
2230                        __put_user((u32)events64[evt_idx].data,
2231                                   &events[evt_idx].data[0]);
2232                        __put_user((u32)(events64[evt_idx].data >> 32),
2233                                   &events[evt_idx].data[1]);
2234                }
2235        }
2236
2237        if (do_free_pages)
2238                free_pages((unsigned long) events64, get_order(size));
2239        else
2240                kfree(events64);
2241        return numevents;
2242}
2243
2244/*
2245 * Get a yet unused TLS descriptor index.
2246 */
2247static int
2248get_free_idx (void)
2249{
2250        struct thread_struct *t = &current->thread;
2251        int idx;
2252
2253        for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
2254                if (desc_empty(t->tls_array + idx))
2255                        return idx + GDT_ENTRY_TLS_MIN;
2256        return -ESRCH;
2257}
2258
2259static void set_tls_desc(struct task_struct *p, int idx,
2260                const struct ia32_user_desc *info, int n)
2261{
2262        struct thread_struct *t = &p->thread;
2263        struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
2264        int cpu;
2265
2266        /*
2267         * We must not get preempted while modifying the TLS.
2268         */
2269        cpu = get_cpu();
2270
2271        while (n-- > 0) {
2272                if (LDT_empty(info)) {
2273                        desc->a = 0;
2274                        desc->b = 0;
2275                } else {
2276                        desc->a = LDT_entry_a(info);
2277                        desc->b = LDT_entry_b(info);
2278                }
2279
2280                ++info;
2281                ++desc;
2282        }
2283
2284        if (t == &current->thread)
2285                load_TLS(t, cpu);
2286
2287        put_cpu();
2288}
2289
2290/*
2291 * Set a given TLS descriptor:
2292 */
2293asmlinkage int
2294sys32_set_thread_area (struct ia32_user_desc __user *u_info)
2295{
2296        struct ia32_user_desc info;
2297        int idx;
2298
2299        if (copy_from_user(&info, u_info, sizeof(info)))
2300                return -EFAULT;
2301        idx = info.entry_number;
2302
2303        /*
2304         * index -1 means the kernel should try to find and allocate an empty descriptor:
2305         */
2306        if (idx == -1) {
2307                idx = get_free_idx();
2308                if (idx < 0)
2309                        return idx;
2310                if (put_user(idx, &u_info->entry_number))
2311                        return -EFAULT;
2312        }
2313
2314        if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
2315                return -EINVAL;
2316
2317        set_tls_desc(current, idx, &info, 1);
2318        return 0;
2319}
2320
2321/*
2322 * Get the current Thread-Local Storage area:
2323 */
2324
2325#define GET_BASE(desc) (                        \
2326        (((desc)->a >> 16) & 0x0000ffff) |      \
2327        (((desc)->b << 16) & 0x00ff0000) |      \
2328        ( (desc)->b        & 0xff000000)   )
2329
2330#define GET_LIMIT(desc) (                       \
2331        ((desc)->a & 0x0ffff) |                 \
2332         ((desc)->b & 0xf0000) )
2333
2334#define GET_32BIT(desc)         (((desc)->b >> 22) & 1)
2335#define GET_CONTENTS(desc)      (((desc)->b >> 10) & 3)
2336#define GET_WRITABLE(desc)      (((desc)->b >>  9) & 1)
2337#define GET_LIMIT_PAGES(desc)   (((desc)->b >> 23) & 1)
2338#define GET_PRESENT(desc)       (((desc)->b >> 15) & 1)
2339#define GET_USEABLE(desc)       (((desc)->b >> 20) & 1)
2340
2341static void fill_user_desc(struct ia32_user_desc *info, int idx,
2342                const struct desc_struct *desc)
2343{
2344        info->entry_number = idx;
2345        info->base_addr = GET_BASE(desc);
2346        info->limit = GET_LIMIT(desc);
2347        info->seg_32bit = GET_32BIT(desc);
2348        info->contents = GET_CONTENTS(desc);
2349        info->read_exec_only = !GET_WRITABLE(desc);
2350        info->limit_in_pages = GET_LIMIT_PAGES(desc);
2351        info->seg_not_present = !GET_PRESENT(desc);
2352        info->useable = GET_USEABLE(desc);
2353}
2354
2355asmlinkage int
2356sys32_get_thread_area (struct ia32_user_desc __user *u_info)
2357{
2358        struct ia32_user_desc info;
2359        struct desc_struct *desc;
2360        int idx;
2361
2362        if (get_user(idx, &u_info->entry_number))
2363                return -EFAULT;
2364        if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
2365                return -EINVAL;
2366
2367        desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
2368        fill_user_desc(&info, idx, desc);
2369
2370        if (copy_to_user(u_info, &info, sizeof(info)))
2371                return -EFAULT;
2372        return 0;
2373}
2374
2375struct regset_get {
2376        void *kbuf;
2377        void __user *ubuf;
2378};
2379
2380struct regset_set {
2381        const void *kbuf;
2382        const void __user *ubuf;
2383};
2384
2385struct regset_getset {
2386        struct task_struct *target;
2387        const struct user_regset *regset;
2388        union {
2389                struct regset_get get;
2390                struct regset_set set;
2391        } u;
2392        unsigned int pos;
2393        unsigned int count;
2394        int ret;
2395};
2396
2397static void getfpreg(struct task_struct *task, int regno, int *val)
2398{
2399        switch (regno / sizeof(int)) {
2400        case 0:
2401                *val = task->thread.fcr & 0xffff;
2402                break;
2403        case 1:
2404                *val = task->thread.fsr & 0xffff;
2405                break;
2406        case 2:
2407                *val = (task->thread.fsr>>16) & 0xffff;
2408                break;
2409        case 3:
2410                *val = task->thread.fir;
2411                break;
2412        case 4:
2413                *val = (task->thread.fir>>32) & 0xffff;
2414                break;
2415        case 5:
2416                *val = task->thread.fdr;
2417                break;
2418        case 6:
2419                *val = (task->thread.fdr >> 32) & 0xffff;
2420                break;
2421        }
2422}
2423
2424static void setfpreg(struct task_struct *task, int regno, int val)
2425{
2426        switch (regno / sizeof(int)) {
2427        case 0:
2428                task->thread.fcr = (task->thread.fcr & (~0x1f3f))
2429                        | (val & 0x1f3f);
2430                break;
2431        case 1:
2432                task->thread.fsr = (task->thread.fsr & (~0xffff)) | val;
2433                break;
2434        case 2:
2435                task->thread.fsr = (task->thread.fsr & (~0xffff0000))
2436                        | (val << 16);
2437                break;
2438        case 3:
2439                task->thread.fir = (task->thread.fir & (~0xffffffff)) | val;
2440                break;
2441        case 5:
2442                task->thread.fdr = (task->thread.fdr & (~0xffffffff)) | val;
2443                break;
2444        }
2445}
2446
2447static void access_fpreg_ia32(int regno, void *reg,
2448                struct pt_regs *pt, struct switch_stack *sw,
2449                int tos, int write)
2450{
2451        void *f;
2452
2453        if ((regno += tos) >= 8)
2454                regno -= 8;
2455        if (regno < 4)
2456                f = &pt->f8 + regno;
2457        else if (regno <= 7)
2458                f = &sw->f12 + (regno - 4);
2459        else {
2460                printk(KERN_ERR "regno must be less than 7 \n");
2461                 return;
2462        }
2463
2464        if (write)
2465                memcpy(f, reg, sizeof(struct _fpreg_ia32));
2466        else
2467                memcpy(reg, f, sizeof(struct _fpreg_ia32));
2468}
2469
2470static void do_fpregs_get(struct unw_frame_info *info, void *arg)
2471{
2472        struct regset_getset *dst = arg;
2473        struct task_struct *task = dst->target;
2474        struct pt_regs *pt;
2475        int start, end, tos;
2476        char buf[80];
2477
2478        if (dst->count == 0 || unw_unwind_to_user(info) < 0)
2479                return;
2480        if (dst->pos < 7 * sizeof(int)) {
2481                end = min((dst->pos + dst->count),
2482                        (unsigned int)(7 * sizeof(int)));
2483                for (start = dst->pos; start < end; start += sizeof(int))
2484                        getfpreg(task, start, (int *)(buf + start));
2485                dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2486                                &dst->u.get.kbuf, &dst->u.get.ubuf, buf,
2487                                0, 7 * sizeof(int));
2488                if (dst->ret || dst->count == 0)
2489                        return;
2490        }
2491        if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
2492                pt = task_pt_regs(task);
2493                tos = (task->thread.fsr >> 11) & 7;
2494                end = min(dst->pos + dst->count,
2495                        (unsigned int)(sizeof(struct ia32_user_i387_struct)));
2496                start = (dst->pos - 7 * sizeof(int)) /
2497                        sizeof(struct _fpreg_ia32);
2498                end = (end - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
2499                for (; start < end; start++)
2500                        access_fpreg_ia32(start,
2501                                (struct _fpreg_ia32 *)buf + start,
2502                                pt, info->sw, tos, 0);
2503                dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2504                                &dst->u.get.kbuf, &dst->u.get.ubuf,
2505                                buf, 7 * sizeof(int),
2506                                sizeof(struct ia32_user_i387_struct));
2507                if (dst->ret || dst->count == 0)
2508                        return;
2509        }
2510}
2511
2512static void do_fpregs_set(struct unw_frame_info *info, void *arg)
2513{
2514        struct regset_getset *dst = arg;
2515        struct task_struct *task = dst->target;
2516        struct pt_regs *pt;
2517        char buf[80];
2518        int end, start, tos;
2519
2520        if (dst->count == 0 || unw_unwind_to_user(info) < 0)
2521                return;
2522
2523        if (dst->pos < 7 * sizeof(int)) {
2524                start = dst->pos;
2525                dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2526                                &dst->u.set.kbuf, &dst->u.set.ubuf, buf,
2527                                0, 7 * sizeof(int));
2528                if (dst->ret)
2529                        return;
2530                for (; start < dst->pos; start += sizeof(int))
2531                        setfpreg(task, start, *((int *)(buf + start)));
2532                if (dst->count == 0)
2533                        return;
2534        }
2535        if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
2536                start = (dst->pos - 7 * sizeof(int)) /
2537                        sizeof(struct _fpreg_ia32);
2538                dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2539                                &dst->u.set.kbuf, &dst->u.set.ubuf,
2540                                buf, 7 * sizeof(int),
2541                                sizeof(struct ia32_user_i387_struct));
2542                if (dst->ret)
2543                        return;
2544                pt = task_pt_regs(task);
2545                tos = (task->thread.fsr >> 11) & 7;
2546                end = (dst->pos - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
2547                for (; start < end; start++)
2548                        access_fpreg_ia32(start,
2549                                (struct _fpreg_ia32 *)buf + start,
2550                                pt, info->sw, tos, 1);
2551                if (dst->count == 0)
2552                        return;
2553        }
2554}
2555
2556#define OFFSET(member) ((int)(offsetof(struct ia32_user_fxsr_struct, member)))
2557static void getfpxreg(struct task_struct *task, int start, int end, char *buf)
2558{
2559        int min_val;
2560
2561        min_val = min(end, OFFSET(fop));
2562        while (start < min_val) {
2563                if (start == OFFSET(cwd))
2564                        *((short *)buf) = task->thread.fcr & 0xffff;
2565                else if (start == OFFSET(swd))
2566                        *((short *)buf) = task->thread.fsr & 0xffff;
2567                else if (start == OFFSET(twd))
2568                        *((short *)buf) = (task->thread.fsr>>16) & 0xffff;
2569                buf += 2;
2570                start += 2;
2571        }
2572        /* skip fop element */
2573        if (start == OFFSET(fop)) {
2574                start += 2;
2575                buf += 2;
2576        }
2577        while (start < end) {
2578                if (start == OFFSET(fip))
2579                        *((int *)buf) = task->thread.fir;
2580                else if (start == OFFSET(fcs))
2581                        *((int *)buf) = (task->thread.fir>>32) & 0xffff;
2582                else if (start == OFFSET(foo))
2583                        *((int *)buf) = task->thread.fdr;
2584                else if (start == OFFSET(fos))
2585                        *((int *)buf) = (task->thread.fdr>>32) & 0xffff;
2586                else if (start == OFFSET(mxcsr))
2587                        *((int *)buf) = ((task->thread.fcr>>32) & 0xff80)
2588                                         | ((task->thread.fsr>>32) & 0x3f);
2589                buf += 4;
2590                start += 4;
2591        }
2592}
2593
2594static void setfpxreg(struct task_struct *task, int start, int end, char *buf)
2595{
2596        int min_val, num32;
2597        short num;
2598        unsigned long num64;
2599
2600        min_val = min(end, OFFSET(fop));
2601        while (start < min_val) {
2602                num = *((short *)buf);
2603                if (start == OFFSET(cwd)) {
2604                        task->thread.fcr = (task->thread.fcr & (~0x1f3f))
2605                                                | (num & 0x1f3f);
2606                } else if (start == OFFSET(swd)) {
2607                        task->thread.fsr = (task->thread.fsr & (~0xffff)) | num;
2608                } else if (start == OFFSET(twd)) {
2609                        task->thread.fsr = (task->thread.fsr & (~0xffff0000))
2610                                | (((int)num) << 16);
2611                }
2612                buf += 2;
2613                start += 2;
2614        }
2615        /* skip fop element */
2616        if (start == OFFSET(fop)) {
2617                start += 2;
2618                buf += 2;
2619        }
2620        while (start < end) {
2621                num32 = *((int *)buf);
2622                if (start == OFFSET(fip))
2623                        task->thread.fir = (task->thread.fir & (~0xffffffff))
2624                                                 | num32;
2625                else if (start == OFFSET(foo))
2626                        task->thread.fdr = (task->thread.fdr & (~0xffffffff))
2627                                                 | num32;
2628                else if (start == OFFSET(mxcsr)) {
2629                        num64 = num32 & 0xff10;
2630                        task->thread.fcr = (task->thread.fcr &
2631                                (~0xff1000000000UL)) | (num64<<32);
2632                        num64 = num32 & 0x3f;
2633                        task->thread.fsr = (task->thread.fsr &
2634                                (~0x3f00000000UL)) | (num64<<32);
2635                }
2636                buf += 4;
2637                start += 4;
2638        }
2639}
2640
2641static void do_fpxregs_get(struct unw_frame_info *info, void *arg)
2642{
2643        struct regset_getset *dst = arg;
2644        struct task_struct *task = dst->target;
2645        struct pt_regs *pt;
2646        char buf[128];
2647        int start, end, tos;
2648
2649        if (dst->count == 0 || unw_unwind_to_user(info) < 0)
2650                return;
2651        if (dst->pos < OFFSET(st_space[0])) {
2652                end = min(dst->pos + dst->count, (unsigned int)32);
2653                getfpxreg(task, dst->pos, end, buf);
2654                dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2655                                &dst->u.get.kbuf, &dst->u.get.ubuf, buf,
2656                                0, OFFSET(st_space[0]));
2657                if (dst->ret || dst->count == 0)
2658                        return;
2659        }
2660        if (dst->pos < OFFSET(xmm_space[0])) {
2661                pt = task_pt_regs(task);
2662                tos = (task->thread.fsr >> 11) & 7;
2663                end = min(dst->pos + dst->count,
2664                                (unsigned int)OFFSET(xmm_space[0]));
2665                start = (dst->pos - OFFSET(st_space[0])) / 16;
2666                end = (end - OFFSET(st_space[0])) / 16;
2667                for (; start < end; start++)
2668                        access_fpreg_ia32(start, buf + 16 * start, pt,
2669                                                info->sw, tos, 0);
2670                dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2671                                &dst->u.get.kbuf, &dst->u.get.ubuf,
2672                                buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
2673                if (dst->ret || dst->count == 0)
2674                        return;
2675        }
2676        if (dst->pos < OFFSET(padding[0]))
2677                dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2678                                &dst->u.get.kbuf, &dst->u.get.ubuf,
2679                                &info->sw->f16, OFFSET(xmm_space[0]),
2680                                OFFSET(padding[0]));
2681}
2682
2683static void do_fpxregs_set(struct unw_frame_info *info, void *arg)
2684{
2685        struct regset_getset *dst = arg;
2686        struct task_struct *task = dst->target;
2687        char buf[128];
2688        int start, end;
2689
2690        if (dst->count == 0 || unw_unwind_to_user(info) < 0)
2691                return;
2692
2693        if (dst->pos < OFFSET(st_space[0])) {
2694                start = dst->pos;
2695                dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2696                                &dst->u.set.kbuf, &dst->u.set.ubuf,
2697                                buf, 0, OFFSET(st_space[0]));
2698                if (dst->ret)
2699                        return;
2700                setfpxreg(task, start, dst->pos, buf);
2701                if (dst->count == 0)
2702                        return;
2703        }
2704        if (dst->pos < OFFSET(xmm_space[0])) {
2705                struct pt_regs *pt;
2706                int tos;
2707                pt = task_pt_regs(task);
2708                tos = (task->thread.fsr >> 11) & 7;
2709                start = (dst->pos - OFFSET(st_space[0])) / 16;
2710                dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2711                                &dst->u.set.kbuf, &dst->u.set.ubuf,
2712                                buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
2713                if (dst->ret)
2714                        return;
2715                end = (dst->pos - OFFSET(st_space[0])) / 16;
2716                for (; start < end; start++)
2717                        access_fpreg_ia32(start, buf + 16 * start, pt, info->sw,
2718                                                 tos, 1);
2719                if (dst->count == 0)
2720                        return;
2721        }
2722        if (dst->pos < OFFSET(padding[0]))
2723                dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2724                                &dst->u.set.kbuf, &dst->u.set.ubuf,
2725                                &info->sw->f16, OFFSET(xmm_space[0]),
2726                                 OFFSET(padding[0]));
2727}
2728#undef OFFSET
2729
2730static int do_regset_call(void (*call)(struct unw_frame_info *, void *),
2731                struct task_struct *target,
2732                const struct user_regset *regset,
2733                unsigned int pos, unsigned int count,
2734                const void *kbuf, const void __user *ubuf)
2735{
2736        struct regset_getset info = { .target = target, .regset = regset,
2737                .pos = pos, .count = count,
2738                .u.set = { .kbuf = kbuf, .ubuf = ubuf },
2739                .ret = 0 };
2740
2741        if (target == current)
2742                unw_init_running(call, &info);
2743        else {
2744                struct unw_frame_info ufi;
2745                memset(&ufi, 0, sizeof(ufi));
2746                unw_init_from_blocked_task(&ufi, target);
2747                (*call)(&ufi, &info);
2748        }
2749
2750        return info.ret;
2751}
2752
2753static int ia32_fpregs_get(struct task_struct *target,
2754                const struct user_regset *regset,
2755                unsigned int pos, unsigned int count,
2756                void *kbuf, void __user *ubuf)
2757{
2758        return do_regset_call(do_fpregs_get, target, regset, pos, count,
2759                kbuf, ubuf);
2760}
2761
2762static int ia32_fpregs_set(struct task_struct *target,
2763                const struct user_regset *regset,
2764                unsigned int pos, unsigned int count,
2765                const void *kbuf, const void __user *ubuf)
2766{
2767        return do_regset_call(do_fpregs_set, target, regset, pos, count,
2768                kbuf, ubuf);
2769}
2770
2771static int ia32_fpxregs_get(struct task_struct *target,
2772                const struct user_regset *regset,
2773                unsigned int pos, unsigned int count,
2774                void *kbuf, void __user *ubuf)
2775{
2776        return do_regset_call(do_fpxregs_get, target, regset, pos, count,
2777                kbuf, ubuf);
2778}
2779
2780static int ia32_fpxregs_set(struct task_struct *target,
2781                const struct user_regset *regset,
2782                unsigned int pos, unsigned int count,
2783                const void *kbuf, const void __user *ubuf)
2784{
2785        return do_regset_call(do_fpxregs_set, target, regset, pos, count,
2786                kbuf, ubuf);
2787}
2788
2789static int ia32_genregs_get(struct task_struct *target,
2790                const struct user_regset *regset,
2791                unsigned int pos, unsigned int count,
2792                void *kbuf, void __user *ubuf)
2793{
2794        if (kbuf) {
2795                u32 *kp = kbuf;
2796                while (count > 0) {
2797                        *kp++ = getreg(target, pos);
2798                        pos += 4;
2799                        count -= 4;
2800                }
2801        } else {
2802                u32 __user *up = ubuf;
2803                while (count > 0) {
2804                        if (__put_user(getreg(target, pos), up++))
2805                                return -EFAULT;
2806                        pos += 4;
2807                        count -= 4;
2808                }
2809        }
2810        return 0;
2811}
2812
2813static int ia32_genregs_set(struct task_struct *target,
2814                const struct user_regset *regset,
2815                unsigned int pos, unsigned int count,
2816                const void *kbuf, const void __user *ubuf)
2817{
2818        int ret = 0;
2819
2820        if (kbuf) {
2821                const u32 *kp = kbuf;
2822                while (!ret && count > 0) {
2823                        putreg(target, pos, *kp++);
2824                        pos += 4;
2825                        count -= 4;
2826                }
2827        } else {
2828                const u32 __user *up = ubuf;
2829                u32 val;
2830                while (!ret && count > 0) {
2831                        ret = __get_user(val, up++);
2832                        if (!ret)
2833                                putreg(target, pos, val);
2834                        pos += 4;
2835                        count -= 4;
2836                }
2837        }
2838        return ret;
2839}
2840
2841static int ia32_tls_active(struct task_struct *target,
2842                const struct user_regset *regset)
2843{
2844        struct thread_struct *t = &target->thread;
2845        int n = GDT_ENTRY_TLS_ENTRIES;
2846        while (n > 0 && desc_empty(&t->tls_array[n -1]))
2847                --n;
2848        return n;
2849}
2850
2851static int ia32_tls_get(struct task_struct *target,
2852                const struct user_regset *regset, unsigned int pos,
2853                unsigned int count, void *kbuf, void __user *ubuf)
2854{
2855        const struct desc_struct *tls;
2856
2857        if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) ||
2858                        (pos % sizeof(struct ia32_user_desc)) != 0 ||
2859                        (count % sizeof(struct ia32_user_desc)) != 0)
2860                return -EINVAL;
2861
2862        pos /= sizeof(struct ia32_user_desc);
2863        count /= sizeof(struct ia32_user_desc);
2864
2865        tls = &target->thread.tls_array[pos];
2866
2867        if (kbuf) {
2868                struct ia32_user_desc *info = kbuf;
2869                while (count-- > 0)
2870                        fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++,
2871                                        tls++);
2872        } else {
2873                struct ia32_user_desc __user *u_info = ubuf;
2874                while (count-- > 0) {
2875                        struct ia32_user_desc info;
2876                        fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++);
2877                        if (__copy_to_user(u_info++, &info, sizeof(info)))
2878                                return -EFAULT;
2879                }
2880        }
2881
2882        return 0;
2883}
2884
2885static int ia32_tls_set(struct task_struct *target,
2886                const struct user_regset *regset, unsigned int pos,
2887                unsigned int count, const void *kbuf, const void __user *ubuf)
2888{
2889        struct ia32_user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
2890        const struct ia32_user_desc *info;
2891
2892        if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) ||
2893                        (pos % sizeof(struct ia32_user_desc)) != 0 ||
2894                        (count % sizeof(struct ia32_user_desc)) != 0)
2895                return -EINVAL;
2896
2897        if (kbuf)
2898                info = kbuf;
2899        else if (__copy_from_user(infobuf, ubuf, count))
2900                return -EFAULT;
2901        else
2902                info = infobuf;
2903
2904        set_tls_desc(target,
2905                GDT_ENTRY_TLS_MIN + (pos / sizeof(struct ia32_user_desc)),
2906                info, count / sizeof(struct ia32_user_desc));
2907
2908        return 0;
2909}
2910
2911/*
2912 * This should match arch/i386/kernel/ptrace.c:native_regsets.
2913 * XXX ioperm? vm86?
2914 */
2915static const struct user_regset ia32_regsets[] = {
2916        {
2917                .core_note_type = NT_PRSTATUS,
2918                .n = sizeof(struct user_regs_struct32)/4,
2919                .size = 4, .align = 4,
2920                .get = ia32_genregs_get, .set = ia32_genregs_set
2921        },
2922        {
2923                .core_note_type = NT_PRFPREG,
2924                .n = sizeof(struct ia32_user_i387_struct) / 4,
2925                .size = 4, .align = 4,
2926                .get = ia32_fpregs_get, .set = ia32_fpregs_set
2927        },
2928        {
2929                .core_note_type = NT_PRXFPREG,
2930                .n = sizeof(struct ia32_user_fxsr_struct) / 4,
2931                .size = 4, .align = 4,
2932                .get = ia32_fpxregs_get, .set = ia32_fpxregs_set
2933        },
2934        {
2935                .core_note_type = NT_386_TLS,
2936                .n = GDT_ENTRY_TLS_ENTRIES,
2937                .bias = GDT_ENTRY_TLS_MIN,
2938                .size = sizeof(struct ia32_user_desc),
2939                .align = sizeof(struct ia32_user_desc),
2940                .active = ia32_tls_active,
2941                .get = ia32_tls_get, .set = ia32_tls_set,
2942        },
2943};
2944
2945const struct user_regset_view user_ia32_view = {
2946        .name = "i386", .e_machine = EM_386,
2947        .regsets = ia32_regsets, .n = ARRAY_SIZE(ia32_regsets)
2948};
2949
2950long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, 
2951                        __u32 len_low, __u32 len_high, int advice)
2952{ 
2953        return sys_fadvise64_64(fd,
2954                               (((u64)offset_high)<<32) | offset_low,
2955                               (((u64)len_high)<<32) | len_low,
2956                               advice); 
2957} 
2958
2959#ifdef  NOTYET  /* UNTESTED FOR IA64 FROM HERE DOWN */
2960
2961asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid)
2962{
2963        uid_t sruid, seuid;
2964
2965        sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
2966        seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
2967        return sys_setreuid(sruid, seuid);
2968}
2969
2970asmlinkage long
2971sys32_setresuid(compat_uid_t ruid, compat_uid_t euid,
2972                compat_uid_t suid)
2973{
2974        uid_t sruid, seuid, ssuid;
2975
2976        sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
2977        seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
2978        ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid);
2979        return sys_setresuid(sruid, seuid, ssuid);
2980}
2981
2982asmlinkage long
2983sys32_setregid(compat_gid_t rgid, compat_gid_t egid)
2984{
2985        gid_t srgid, segid;
2986
2987        srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
2988        segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
2989        return sys_setregid(srgid, segid);
2990}
2991
2992asmlinkage long
2993sys32_setresgid(compat_gid_t rgid, compat_gid_t egid,
2994                compat_gid_t sgid)
2995{
2996        gid_t srgid, segid, ssgid;
2997
2998        srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
2999        segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
3000        ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid);
3001        return sys_setresgid(srgid, segid, ssgid);
3002}
3003#endif /* NOTYET */
3004