linux/drivers/xen/xencomm.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License as published by
   4 * the Free Software Foundation; either version 2 of the License, or
   5 * (at your option) any later version.
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 *
  12 * You should have received a copy of the GNU General Public License
  13 * along with this program; if not, write to the Free Software
  14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  15 *
  16 * Copyright (C) IBM Corp. 2006
  17 *
  18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  19 */
  20
  21#include <linux/mm.h>
  22#include <linux/slab.h>
  23#include <asm/page.h>
  24#include <xen/xencomm.h>
  25#include <xen/interface/xen.h>
  26#include <asm/xen/xencomm.h>    /* for xencomm_is_phys_contiguous() */
  27
  28static int xencomm_init(struct xencomm_desc *desc,
  29                        void *buffer, unsigned long bytes)
  30{
  31        unsigned long recorded = 0;
  32        int i = 0;
  33
  34        while ((recorded < bytes) && (i < desc->nr_addrs)) {
  35                unsigned long vaddr = (unsigned long)buffer + recorded;
  36                unsigned long paddr;
  37                int offset;
  38                int chunksz;
  39
  40                offset = vaddr % PAGE_SIZE; /* handle partial pages */
  41                chunksz = min(PAGE_SIZE - offset, bytes - recorded);
  42
  43                paddr = xencomm_vtop(vaddr);
  44                if (paddr == ~0UL) {
  45                        printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n",
  46                               __func__, vaddr);
  47                        return -EINVAL;
  48                }
  49
  50                desc->address[i++] = paddr;
  51                recorded += chunksz;
  52        }
  53
  54        if (recorded < bytes) {
  55                printk(KERN_DEBUG
  56                       "%s: could only translate %ld of %ld bytes\n",
  57                       __func__, recorded, bytes);
  58                return -ENOSPC;
  59        }
  60
  61        /* mark remaining addresses invalid (just for safety) */
  62        while (i < desc->nr_addrs)
  63                desc->address[i++] = XENCOMM_INVALID;
  64
  65        desc->magic = XENCOMM_MAGIC;
  66
  67        return 0;
  68}
  69
  70static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
  71                                          void *buffer, unsigned long bytes)
  72{
  73        struct xencomm_desc *desc;
  74        unsigned long buffer_ulong = (unsigned long)buffer;
  75        unsigned long start = buffer_ulong & PAGE_MASK;
  76        unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
  77        unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
  78        unsigned long size = sizeof(*desc) +
  79                sizeof(desc->address[0]) * nr_addrs;
  80
  81        /*
  82         * slab allocator returns at least sizeof(void*) aligned pointer.
  83         * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
  84         * cross page boundary.
  85         */
  86        if (sizeof(*desc) > sizeof(void *)) {
  87                unsigned long order = get_order(size);
  88                desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
  89                                                               order);
  90                if (desc == NULL)
  91                        return NULL;
  92
  93                desc->nr_addrs =
  94                        ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
  95                        sizeof(*desc->address);
  96        } else {
  97                desc = kmalloc(size, gfp_mask);
  98                if (desc == NULL)
  99                        return NULL;
 100
 101                desc->nr_addrs = nr_addrs;
 102        }
 103        return desc;
 104}
 105
 106void xencomm_free(struct xencomm_handle *desc)
 107{
 108        if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
 109                struct xencomm_desc *desc__ = (struct xencomm_desc *)desc;
 110                if (sizeof(*desc__) > sizeof(void *)) {
 111                        unsigned long size = sizeof(*desc__) +
 112                                sizeof(desc__->address[0]) * desc__->nr_addrs;
 113                        unsigned long order = get_order(size);
 114                        free_pages((unsigned long)__va(desc), order);
 115                } else
 116                        kfree(__va(desc));
 117        }
 118}
 119
 120static int xencomm_create(void *buffer, unsigned long bytes,
 121                          struct xencomm_desc **ret, gfp_t gfp_mask)
 122{
 123        struct xencomm_desc *desc;
 124        int rc;
 125
 126        pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
 127
 128        if (bytes == 0) {
 129                /* don't create a descriptor; Xen recognizes NULL. */
 130                BUG_ON(buffer != NULL);
 131                *ret = NULL;
 132                return 0;
 133        }
 134
 135        BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
 136
 137        desc = xencomm_alloc(gfp_mask, buffer, bytes);
 138        if (!desc) {
 139                printk(KERN_DEBUG "%s failure\n", "xencomm_alloc");
 140                return -ENOMEM;
 141        }
 142
 143        rc = xencomm_init(desc, buffer, bytes);
 144        if (rc) {
 145                printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc);
 146                xencomm_free((struct xencomm_handle *)__pa(desc));
 147                return rc;
 148        }
 149
 150        *ret = desc;
 151        return 0;
 152}
 153
 154static struct xencomm_handle *xencomm_create_inline(void *ptr)
 155{
 156        unsigned long paddr;
 157
 158        BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
 159
 160        paddr = (unsigned long)xencomm_pa(ptr);
 161        BUG_ON(paddr & XENCOMM_INLINE_FLAG);
 162        return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
 163}
 164
 165/* "mini" routine, for stack-based communications: */
 166static int xencomm_create_mini(void *buffer,
 167        unsigned long bytes, struct xencomm_mini *xc_desc,
 168        struct xencomm_desc **ret)
 169{
 170        int rc = 0;
 171        struct xencomm_desc *desc;
 172        BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
 173
 174        desc = (void *)xc_desc;
 175
 176        desc->nr_addrs = XENCOMM_MINI_ADDRS;
 177
 178        rc = xencomm_init(desc, buffer, bytes);
 179        if (!rc)
 180                *ret = desc;
 181
 182        return rc;
 183}
 184
 185struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
 186{
 187        int rc;
 188        struct xencomm_desc *desc;
 189
 190        if (xencomm_is_phys_contiguous((unsigned long)ptr))
 191                return xencomm_create_inline(ptr);
 192
 193        rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
 194
 195        if (rc || desc == NULL)
 196                return NULL;
 197
 198        return xencomm_pa(desc);
 199}
 200
 201struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
 202                        struct xencomm_mini *xc_desc)
 203{
 204        int rc;
 205        struct xencomm_desc *desc = NULL;
 206
 207        if (xencomm_is_phys_contiguous((unsigned long)ptr))
 208                return xencomm_create_inline(ptr);
 209
 210        rc = xencomm_create_mini(ptr, bytes, xc_desc,
 211                                &desc);
 212
 213        if (rc)
 214                return NULL;
 215
 216        return xencomm_pa(desc);
 217}
 218
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.