linux/drivers/xen/gntdev.c
<<
>>
Prefs
   1/******************************************************************************
   2 * gntdev.c
   3 *
   4 * Device for accessing (in user-space) pages that have been granted by other
   5 * domains.
   6 *
   7 * Copyright (c) 2006-2007, D G Murray.
   8 *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18 */
  19
  20#undef DEBUG
  21
  22#include <linux/module.h>
  23#include <linux/kernel.h>
  24#include <linux/init.h>
  25#include <linux/miscdevice.h>
  26#include <linux/fs.h>
  27#include <linux/mm.h>
  28#include <linux/mman.h>
  29#include <linux/mmu_notifier.h>
  30#include <linux/types.h>
  31#include <linux/uaccess.h>
  32#include <linux/sched.h>
  33#include <linux/spinlock.h>
  34#include <linux/slab.h>
  35#include <linux/highmem.h>
  36
  37#include <xen/xen.h>
  38#include <xen/grant_table.h>
  39#include <xen/balloon.h>
  40#include <xen/gntdev.h>
  41#include <xen/events.h>
  42#include <asm/xen/hypervisor.h>
  43#include <asm/xen/hypercall.h>
  44#include <asm/xen/page.h>
  45
  46MODULE_LICENSE("GPL");
  47MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
  48              "Gerd Hoffmann <kraxel@redhat.com>");
  49MODULE_DESCRIPTION("User-space granted page access driver");
  50
  51static int limit = 1024*1024;
  52module_param(limit, int, 0644);
  53MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
  54                "the gntdev device");
  55
  56static atomic_t pages_mapped = ATOMIC_INIT(0);
  57
  58static int use_ptemod;
  59#define populate_freeable_maps use_ptemod
  60
  61struct gntdev_priv {
  62        /* maps with visible offsets in the file descriptor */
  63        struct list_head maps;
  64        /* maps that are not visible; will be freed on munmap.
  65         * Only populated if populate_freeable_maps == 1 */
  66        struct list_head freeable_maps;
  67        /* lock protects maps and freeable_maps */
  68        spinlock_t lock;
  69        struct mm_struct *mm;
  70        struct mmu_notifier mn;
  71};
  72
  73struct unmap_notify {
  74        int flags;
  75        /* Address relative to the start of the grant_map */
  76        int addr;
  77        int event;
  78};
  79
  80struct grant_map {
  81        struct list_head next;
  82        struct vm_area_struct *vma;
  83        int index;
  84        int count;
  85        int flags;
  86        atomic_t users;
  87        struct unmap_notify notify;
  88        struct ioctl_gntdev_grant_ref *grants;
  89        struct gnttab_map_grant_ref   *map_ops;
  90        struct gnttab_unmap_grant_ref *unmap_ops;
  91        struct gnttab_map_grant_ref   *kmap_ops;
  92        struct page **pages;
  93};
  94
  95static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
  96
  97/* ------------------------------------------------------------------ */
  98
  99static void gntdev_print_maps(struct gntdev_priv *priv,
 100                              char *text, int text_index)
 101{
 102#ifdef DEBUG
 103        struct grant_map *map;
 104
 105        pr_debug("%s: maps list (priv %p)\n", __func__, priv);
 106        list_for_each_entry(map, &priv->maps, next)
 107                pr_debug("  index %2d, count %2d %s\n",
 108                       map->index, map->count,
 109                       map->index == text_index && text ? text : "");
 110#endif
 111}
 112
 113static void gntdev_free_map(struct grant_map *map)
 114{
 115        if (map == NULL)
 116                return;
 117
 118        if (map->pages)
 119                free_xenballooned_pages(map->count, map->pages);
 120        kfree(map->pages);
 121        kfree(map->grants);
 122        kfree(map->map_ops);
 123        kfree(map->unmap_ops);
 124        kfree(map->kmap_ops);
 125        kfree(map);
 126}
 127
 128static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
 129{
 130        struct grant_map *add;
 131        int i;
 132
 133        add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
 134        if (NULL == add)
 135                return NULL;
 136
 137        add->grants    = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
 138        add->map_ops   = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
 139        add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
 140        add->kmap_ops  = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
 141        add->pages     = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
 142        if (NULL == add->grants    ||
 143            NULL == add->map_ops   ||
 144            NULL == add->unmap_ops ||
 145            NULL == add->kmap_ops  ||
 146            NULL == add->pages)
 147                goto err;
 148
 149        if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */))
 150                goto err;
 151
 152        for (i = 0; i < count; i++) {
 153                add->map_ops[i].handle = -1;
 154                add->unmap_ops[i].handle = -1;
 155                add->kmap_ops[i].handle = -1;
 156        }
 157
 158        add->index = 0;
 159        add->count = count;
 160        atomic_set(&add->users, 1);
 161
 162        return add;
 163
 164err:
 165        gntdev_free_map(add);
 166        return NULL;
 167}
 168
 169static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
 170{
 171        struct grant_map *map;
 172
 173        list_for_each_entry(map, &priv->maps, next) {
 174                if (add->index + add->count < map->index) {
 175                        list_add_tail(&add->next, &map->next);
 176                        goto done;
 177                }
 178                add->index = map->index + map->count;
 179        }
 180        list_add_tail(&add->next, &priv->maps);
 181
 182done:
 183        gntdev_print_maps(priv, "[new]", add->index);
 184}
 185
 186static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
 187                int index, int count)
 188{
 189        struct grant_map *map;
 190
 191        list_for_each_entry(map, &priv->maps, next) {
 192                if (map->index != index)
 193                        continue;
 194                if (count && map->count != count)
 195                        continue;
 196                return map;
 197        }
 198        return NULL;
 199}
 200
 201static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
 202{
 203        if (!map)
 204                return;
 205
 206        if (!atomic_dec_and_test(&map->users))
 207                return;
 208
 209        atomic_sub(map->count, &pages_mapped);
 210
 211        if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
 212                notify_remote_via_evtchn(map->notify.event);
 213                evtchn_put(map->notify.event);
 214        }
 215
 216        if (populate_freeable_maps && priv) {
 217                spin_lock(&priv->lock);
 218                list_del(&map->next);
 219                spin_unlock(&priv->lock);
 220        }
 221
 222        if (map->pages && !use_ptemod)
 223                unmap_grant_pages(map, 0, map->count);
 224        gntdev_free_map(map);
 225}
 226
 227/* ------------------------------------------------------------------ */
 228
 229static int find_grant_ptes(pte_t *pte, pgtable_t token,
 230                unsigned long addr, void *data)
 231{
 232        struct grant_map *map = data;
 233        unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
 234        int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
 235        u64 pte_maddr;
 236
 237        BUG_ON(pgnr >= map->count);
 238        pte_maddr = arbitrary_virt_to_machine(pte).maddr;
 239
 240        gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
 241                          map->grants[pgnr].ref,
 242                          map->grants[pgnr].domid);
 243        gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
 244                            -1 /* handle */);
 245        return 0;
 246}
 247
 248static int map_grant_pages(struct grant_map *map)
 249{
 250        int i, err = 0;
 251
 252        if (!use_ptemod) {
 253                /* Note: it could already be mapped */
 254                if (map->map_ops[0].handle != -1)
 255                        return 0;
 256                for (i = 0; i < map->count; i++) {
 257                        unsigned long addr = (unsigned long)
 258                                pfn_to_kaddr(page_to_pfn(map->pages[i]));
 259                        gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
 260                                map->grants[i].ref,
 261                                map->grants[i].domid);
 262                        gnttab_set_unmap_op(&map->unmap_ops[i], addr,
 263                                map->flags, -1 /* handle */);
 264                }
 265        } else {
 266                /*
 267                 * Setup the map_ops corresponding to the pte entries pointing
 268                 * to the kernel linear addresses of the struct pages.
 269                 * These ptes are completely different from the user ptes dealt
 270                 * with find_grant_ptes.
 271                 */
 272                for (i = 0; i < map->count; i++) {
 273                        unsigned level;
 274                        unsigned long address = (unsigned long)
 275                                pfn_to_kaddr(page_to_pfn(map->pages[i]));
 276                        pte_t *ptep;
 277                        u64 pte_maddr = 0;
 278                        BUG_ON(PageHighMem(map->pages[i]));
 279
 280                        ptep = lookup_address(address, &level);
 281                        pte_maddr = arbitrary_virt_to_machine(ptep).maddr;
 282                        gnttab_set_map_op(&map->kmap_ops[i], pte_maddr,
 283                                map->flags |
 284                                GNTMAP_host_map |
 285                                GNTMAP_contains_pte,
 286                                map->grants[i].ref,
 287                                map->grants[i].domid);
 288                }
 289        }
 290
 291        pr_debug("map %d+%d\n", map->index, map->count);
 292        err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
 293                        map->pages, map->count);
 294        if (err)
 295                return err;
 296
 297        for (i = 0; i < map->count; i++) {
 298                if (map->map_ops[i].status)
 299                        err = -EINVAL;
 300                else {
 301                        BUG_ON(map->map_ops[i].handle == -1);
 302                        map->unmap_ops[i].handle = map->map_ops[i].handle;
 303                        pr_debug("map handle=%d\n", map->map_ops[i].handle);
 304                }
 305        }
 306        return err;
 307}
 308
 309static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
 310{
 311        int i, err = 0;
 312
 313        if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
 314                int pgno = (map->notify.addr >> PAGE_SHIFT);
 315                if (pgno >= offset && pgno < offset + pages) {
 316                        /* No need for kmap, pages are in lowmem */
 317                        uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
 318                        tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
 319                        map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
 320                }
 321        }
 322
 323        err = gnttab_unmap_refs(map->unmap_ops + offset,
 324                        use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset,
 325                        pages);
 326        if (err)
 327                return err;
 328
 329        for (i = 0; i < pages; i++) {
 330                if (map->unmap_ops[offset+i].status)
 331                        err = -EINVAL;
 332                pr_debug("unmap handle=%d st=%d\n",
 333                        map->unmap_ops[offset+i].handle,
 334                        map->unmap_ops[offset+i].status);
 335                map->unmap_ops[offset+i].handle = -1;
 336        }
 337        return err;
 338}
 339
 340static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
 341{
 342        int range, err = 0;
 343
 344        pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
 345
 346        /* It is possible the requested range will have a "hole" where we
 347         * already unmapped some of the grants. Only unmap valid ranges.
 348         */
 349        while (pages && !err) {
 350                while (pages && map->unmap_ops[offset].handle == -1) {
 351                        offset++;
 352                        pages--;
 353                }
 354                range = 0;
 355                while (range < pages) {
 356                        if (map->unmap_ops[offset+range].handle == -1) {
 357                                range--;
 358                                break;
 359                        }
 360                        range++;
 361                }
 362                err = __unmap_grant_pages(map, offset, range);
 363                offset += range;
 364                pages -= range;
 365        }
 366
 367        return err;
 368}
 369
 370/* ------------------------------------------------------------------ */
 371
 372static void gntdev_vma_open(struct vm_area_struct *vma)
 373{
 374        struct grant_map *map = vma->vm_private_data;
 375
 376        pr_debug("gntdev_vma_open %p\n", vma);
 377        atomic_inc(&map->users);
 378}
 379
 380static void gntdev_vma_close(struct vm_area_struct *vma)
 381{
 382        struct grant_map *map = vma->vm_private_data;
 383        struct file *file = vma->vm_file;
 384        struct gntdev_priv *priv = file->private_data;
 385
 386        pr_debug("gntdev_vma_close %p\n", vma);
 387        if (use_ptemod) {
 388                /* It is possible that an mmu notifier could be running
 389                 * concurrently, so take priv->lock to ensure that the vma won't
 390                 * vanishing during the unmap_grant_pages call, since we will
 391                 * spin here until that completes. Such a concurrent call will
 392                 * not do any unmapping, since that has been done prior to
 393                 * closing the vma, but it may still iterate the unmap_ops list.
 394                 */
 395                spin_lock(&priv->lock);
 396                map->vma = NULL;
 397                spin_unlock(&priv->lock);
 398        }
 399        vma->vm_private_data = NULL;
 400        gntdev_put_map(priv, map);
 401}
 402
 403static struct vm_operations_struct gntdev_vmops = {
 404        .open = gntdev_vma_open,
 405        .close = gntdev_vma_close,
 406};
 407
 408/* ------------------------------------------------------------------ */
 409
 410static void unmap_if_in_range(struct grant_map *map,
 411                              unsigned long start, unsigned long end)
 412{
 413        unsigned long mstart, mend;
 414        int err;
 415
 416        if (!map->vma)
 417                return;
 418        if (map->vma->vm_start >= end)
 419                return;
 420        if (map->vma->vm_end <= start)
 421                return;
 422        mstart = max(start, map->vma->vm_start);
 423        mend   = min(end,   map->vma->vm_end);
 424        pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
 425                        map->index, map->count,
 426                        map->vma->vm_start, map->vma->vm_end,
 427                        start, end, mstart, mend);
 428        err = unmap_grant_pages(map,
 429                                (mstart - map->vma->vm_start) >> PAGE_SHIFT,
 430                                (mend - mstart) >> PAGE_SHIFT);
 431        WARN_ON(err);
 432}
 433
 434static void mn_invl_range_start(struct mmu_notifier *mn,
 435                                struct mm_struct *mm,
 436                                unsigned long start, unsigned long end)
 437{
 438        struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
 439        struct grant_map *map;
 440
 441        spin_lock(&priv->lock);
 442        list_for_each_entry(map, &priv->maps, next) {
 443                unmap_if_in_range(map, start, end);
 444        }
 445        list_for_each_entry(map, &priv->freeable_maps, next) {
 446                unmap_if_in_range(map, start, end);
 447        }
 448        spin_unlock(&priv->lock);
 449}
 450
 451static void mn_invl_page(struct mmu_notifier *mn,
 452                         struct mm_struct *mm,
 453                         unsigned long address)
 454{
 455        mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
 456}
 457
 458static void mn_release(struct mmu_notifier *mn,
 459                       struct mm_struct *mm)
 460{
 461        struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
 462        struct grant_map *map;
 463        int err;
 464
 465        spin_lock(&priv->lock);
 466        list_for_each_entry(map, &priv->maps, next) {
 467                if (!map->vma)
 468                        continue;
 469                pr_debug("map %d+%d (%lx %lx)\n",
 470                                map->index, map->count,
 471                                map->vma->vm_start, map->vma->vm_end);
 472                err = unmap_grant_pages(map, /* offset */ 0, map->count);
 473                WARN_ON(err);
 474        }
 475        list_for_each_entry(map, &priv->freeable_maps, next) {
 476                if (!map->vma)
 477                        continue;
 478                pr_debug("map %d+%d (%lx %lx)\n",
 479                                map->index, map->count,
 480                                map->vma->vm_start, map->vma->vm_end);
 481                err = unmap_grant_pages(map, /* offset */ 0, map->count);
 482                WARN_ON(err);
 483        }
 484        spin_unlock(&priv->lock);
 485}
 486
 487static struct mmu_notifier_ops gntdev_mmu_ops = {
 488        .release                = mn_release,
 489        .invalidate_page        = mn_invl_page,
 490        .invalidate_range_start = mn_invl_range_start,
 491};
 492
 493/* ------------------------------------------------------------------ */
 494
 495static int gntdev_open(struct inode *inode, struct file *flip)
 496{
 497        struct gntdev_priv *priv;
 498        int ret = 0;
 499
 500        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 501        if (!priv)
 502                return -ENOMEM;
 503
 504        INIT_LIST_HEAD(&priv->maps);
 505        INIT_LIST_HEAD(&priv->freeable_maps);
 506        spin_lock_init(&priv->lock);
 507
 508        if (use_ptemod) {
 509                priv->mm = get_task_mm(current);
 510                if (!priv->mm) {
 511                        kfree(priv);
 512                        return -ENOMEM;
 513                }
 514                priv->mn.ops = &gntdev_mmu_ops;
 515                ret = mmu_notifier_register(&priv->mn, priv->mm);
 516                mmput(priv->mm);
 517        }
 518
 519        if (ret) {
 520                kfree(priv);
 521                return ret;
 522        }
 523
 524        flip->private_data = priv;
 525        pr_debug("priv %p\n", priv);
 526
 527        return 0;
 528}
 529
 530static int gntdev_release(struct inode *inode, struct file *flip)
 531{
 532        struct gntdev_priv *priv = flip->private_data;
 533        struct grant_map *map;
 534
 535        pr_debug("priv %p\n", priv);
 536
 537        while (!list_empty(&priv->maps)) {
 538                map = list_entry(priv->maps.next, struct grant_map, next);
 539                list_del(&map->next);
 540                gntdev_put_map(NULL /* already removed */, map);
 541        }
 542        WARN_ON(!list_empty(&priv->freeable_maps));
 543
 544        if (use_ptemod)
 545                mmu_notifier_unregister(&priv->mn, priv->mm);
 546        kfree(priv);
 547        return 0;
 548}
 549
 550static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
 551                                       struct ioctl_gntdev_map_grant_ref __user *u)
 552{
 553        struct ioctl_gntdev_map_grant_ref op;
 554        struct grant_map *map;
 555        int err;
 556
 557        if (copy_from_user(&op, u, sizeof(op)) != 0)
 558                return -EFAULT;
 559        pr_debug("priv %p, add %d\n", priv, op.count);
 560        if (unlikely(op.count <= 0))
 561                return -EINVAL;
 562
 563        err = -ENOMEM;
 564        map = gntdev_alloc_map(priv, op.count);
 565        if (!map)
 566                return err;
 567
 568        if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
 569                pr_debug("can't map: over limit\n");
 570                gntdev_put_map(NULL, map);
 571                return err;
 572        }
 573
 574        if (copy_from_user(map->grants, &u->refs,
 575                           sizeof(map->grants[0]) * op.count) != 0) {
 576                gntdev_put_map(NULL, map);
 577                return -EFAULT;
 578        }
 579
 580        spin_lock(&priv->lock);
 581        gntdev_add_map(priv, map);
 582        op.index = map->index << PAGE_SHIFT;
 583        spin_unlock(&priv->lock);
 584
 585        if (copy_to_user(u, &op, sizeof(op)) != 0)
 586                return -EFAULT;
 587
 588        return 0;
 589}
 590
 591static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
 592                                         struct ioctl_gntdev_unmap_grant_ref __user *u)
 593{
 594        struct ioctl_gntdev_unmap_grant_ref op;
 595        struct grant_map *map;
 596        int err = -ENOENT;
 597
 598        if (copy_from_user(&op, u, sizeof(op)) != 0)
 599                return -EFAULT;
 600        pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
 601
 602        spin_lock(&priv->lock);
 603        map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
 604        if (map) {
 605                list_del(&map->next);
 606                if (populate_freeable_maps)
 607                        list_add_tail(&map->next, &priv->freeable_maps);
 608                err = 0;
 609        }
 610        spin_unlock(&priv->lock);
 611        if (map)
 612                gntdev_put_map(priv, map);
 613        return err;
 614}
 615
 616static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
 617                                              struct ioctl_gntdev_get_offset_for_vaddr __user *u)
 618{
 619        struct ioctl_gntdev_get_offset_for_vaddr op;
 620        struct vm_area_struct *vma;
 621        struct grant_map *map;
 622        int rv = -EINVAL;
 623
 624        if (copy_from_user(&op, u, sizeof(op)) != 0)
 625                return -EFAULT;
 626        pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
 627
 628        down_read(&current->mm->mmap_sem);
 629        vma = find_vma(current->mm, op.vaddr);
 630        if (!vma || vma->vm_ops != &gntdev_vmops)
 631                goto out_unlock;
 632
 633        map = vma->vm_private_data;
 634        if (!map)
 635                goto out_unlock;
 636
 637        op.offset = map->index << PAGE_SHIFT;
 638        op.count = map->count;
 639        rv = 0;
 640
 641 out_unlock:
 642        up_read(&current->mm->mmap_sem);
 643
 644        if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
 645                return -EFAULT;
 646        return rv;
 647}
 648
 649static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
 650{
 651        struct ioctl_gntdev_unmap_notify op;
 652        struct grant_map *map;
 653        int rc;
 654        int out_flags;
 655        unsigned int out_event;
 656
 657        if (copy_from_user(&op, u, sizeof(op)))
 658                return -EFAULT;
 659
 660        if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
 661                return -EINVAL;
 662
 663        /* We need to grab a reference to the event channel we are going to use
 664         * to send the notify before releasing the reference we may already have
 665         * (if someone has called this ioctl twice). This is required so that
 666         * it is possible to change the clear_byte part of the notification
 667         * without disturbing the event channel part, which may now be the last
 668         * reference to that event channel.
 669         */
 670        if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
 671                if (evtchn_get(op.event_channel_port))
 672                        return -EINVAL;
 673        }
 674
 675        out_flags = op.action;
 676        out_event = op.event_channel_port;
 677
 678        spin_lock(&priv->lock);
 679
 680        list_for_each_entry(map, &priv->maps, next) {
 681                uint64_t begin = map->index << PAGE_SHIFT;
 682                uint64_t end = (map->index + map->count) << PAGE_SHIFT;
 683                if (op.index >= begin && op.index < end)
 684                        goto found;
 685        }
 686        rc = -ENOENT;
 687        goto unlock_out;
 688
 689 found:
 690        if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
 691                        (map->flags & GNTMAP_readonly)) {
 692                rc = -EINVAL;
 693                goto unlock_out;
 694        }
 695
 696        out_flags = map->notify.flags;
 697        out_event = map->notify.event;
 698
 699        map->notify.flags = op.action;
 700        map->notify.addr = op.index - (map->index << PAGE_SHIFT);
 701        map->notify.event = op.event_channel_port;
 702
 703        rc = 0;
 704
 705 unlock_out:
 706        spin_unlock(&priv->lock);
 707
 708        /* Drop the reference to the event channel we did not save in the map */
 709        if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
 710                evtchn_put(out_event);
 711
 712        return rc;
 713}
 714
 715static long gntdev_ioctl(struct file *flip,
 716                         unsigned int cmd, unsigned long arg)
 717{
 718        struct gntdev_priv *priv = flip->private_data;
 719        void __user *ptr = (void __user *)arg;
 720
 721        switch (cmd) {
 722        case IOCTL_GNTDEV_MAP_GRANT_REF:
 723                return gntdev_ioctl_map_grant_ref(priv, ptr);
 724
 725        case IOCTL_GNTDEV_UNMAP_GRANT_REF:
 726                return gntdev_ioctl_unmap_grant_ref(priv, ptr);
 727
 728        case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
 729                return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
 730
 731        case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
 732                return gntdev_ioctl_notify(priv, ptr);
 733
 734        default:
 735                pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
 736                return -ENOIOCTLCMD;
 737        }
 738
 739        return 0;
 740}
 741
 742static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
 743{
 744        struct gntdev_priv *priv = flip->private_data;
 745        int index = vma->vm_pgoff;
 746        int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 747        struct grant_map *map;
 748        int i, err = -EINVAL;
 749
 750        if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
 751                return -EINVAL;
 752
 753        pr_debug("map %d+%d at %lx (pgoff %lx)\n",
 754                        index, count, vma->vm_start, vma->vm_pgoff);
 755
 756        spin_lock(&priv->lock);
 757        map = gntdev_find_map_index(priv, index, count);
 758        if (!map)
 759                goto unlock_out;
 760        if (use_ptemod && map->vma)
 761                goto unlock_out;
 762        if (use_ptemod && priv->mm != vma->vm_mm) {
 763                printk(KERN_WARNING "Huh? Other mm?\n");
 764                goto unlock_out;
 765        }
 766
 767        atomic_inc(&map->users);
 768
 769        vma->vm_ops = &gntdev_vmops;
 770
 771        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 772
 773        if (use_ptemod)
 774                vma->vm_flags |= VM_DONTCOPY;
 775
 776        vma->vm_private_data = map;
 777
 778        if (use_ptemod)
 779                map->vma = vma;
 780
 781        if (map->flags) {
 782                if ((vma->vm_flags & VM_WRITE) &&
 783                                (map->flags & GNTMAP_readonly))
 784                        goto out_unlock_put;
 785        } else {
 786                map->flags = GNTMAP_host_map;
 787                if (!(vma->vm_flags & VM_WRITE))
 788                        map->flags |= GNTMAP_readonly;
 789        }
 790
 791        spin_unlock(&priv->lock);
 792
 793        if (use_ptemod) {
 794                err = apply_to_page_range(vma->vm_mm, vma->vm_start,
 795                                          vma->vm_end - vma->vm_start,
 796                                          find_grant_ptes, map);
 797                if (err) {
 798                        printk(KERN_WARNING "find_grant_ptes() failure.\n");
 799                        goto out_put_map;
 800                }
 801        }
 802
 803        err = map_grant_pages(map);
 804        if (err)
 805                goto out_put_map;
 806
 807        if (!use_ptemod) {
 808                for (i = 0; i < count; i++) {
 809                        err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
 810                                map->pages[i]);
 811                        if (err)
 812                                goto out_put_map;
 813                }
 814        }
 815
 816        return 0;
 817
 818unlock_out:
 819        spin_unlock(&priv->lock);
 820        return err;
 821
 822out_unlock_put:
 823        spin_unlock(&priv->lock);
 824out_put_map:
 825        if (use_ptemod)
 826                map->vma = NULL;
 827        gntdev_put_map(priv, map);
 828        return err;
 829}
 830
 831static const struct file_operations gntdev_fops = {
 832        .owner = THIS_MODULE,
 833        .open = gntdev_open,
 834        .release = gntdev_release,
 835        .mmap = gntdev_mmap,
 836        .unlocked_ioctl = gntdev_ioctl
 837};
 838
 839static struct miscdevice gntdev_miscdev = {
 840        .minor        = MISC_DYNAMIC_MINOR,
 841        .name         = "xen/gntdev",
 842        .fops         = &gntdev_fops,
 843};
 844
 845/* ------------------------------------------------------------------ */
 846
 847static int __init gntdev_init(void)
 848{
 849        int err;
 850
 851        if (!xen_domain())
 852                return -ENODEV;
 853
 854        use_ptemod = xen_pv_domain();
 855
 856        err = misc_register(&gntdev_miscdev);
 857        if (err != 0) {
 858                printk(KERN_ERR "Could not register gntdev device\n");
 859                return err;
 860        }
 861        return 0;
 862}
 863
 864static void __exit gntdev_exit(void)
 865{
 866        misc_deregister(&gntdev_miscdev);
 867}
 868
 869module_init(gntdev_init);
 870module_exit(gntdev_exit);
 871
 872/* ------------------------------------------------------------------ */
 873
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.