linux/drivers/pci/iova.c
<<
>>
Prefs
   1/*
   2 * Copyright \xC2\xA9 2006-2009, Intel Corporation.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15 * Place - Suite 330, Boston, MA 02111-1307 USA.
  16 *
  17 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  18 */
  19
  20#include <linux/iova.h>
  21
  22void
  23init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
  24{
  25        spin_lock_init(&iovad->iova_alloc_lock);
  26        spin_lock_init(&iovad->iova_rbtree_lock);
  27        iovad->rbroot = RB_ROOT;
  28        iovad->cached32_node = NULL;
  29        iovad->dma_32bit_pfn = pfn_32bit;
  30}
  31
  32static struct rb_node *
  33__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
  34{
  35        if ((*limit_pfn != iovad->dma_32bit_pfn) ||
  36                (iovad->cached32_node == NULL))
  37                return rb_last(&iovad->rbroot);
  38        else {
  39                struct rb_node *prev_node = rb_prev(iovad->cached32_node);
  40                struct iova *curr_iova =
  41                        container_of(iovad->cached32_node, struct iova, node);
  42                *limit_pfn = curr_iova->pfn_lo - 1;
  43                return prev_node;
  44        }
  45}
  46
  47static void
  48__cached_rbnode_insert_update(struct iova_domain *iovad,
  49        unsigned long limit_pfn, struct iova *new)
  50{
  51        if (limit_pfn != iovad->dma_32bit_pfn)
  52                return;
  53        iovad->cached32_node = &new->node;
  54}
  55
  56static void
  57__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
  58{
  59        struct iova *cached_iova;
  60        struct rb_node *curr;
  61
  62        if (!iovad->cached32_node)
  63                return;
  64        curr = iovad->cached32_node;
  65        cached_iova = container_of(curr, struct iova, node);
  66
  67        if (free->pfn_lo >= cached_iova->pfn_lo)
  68                iovad->cached32_node = rb_next(&free->node);
  69}
  70
  71/* Computes the padding size required, to make the
  72 * the start address naturally aligned on its size
  73 */
  74static int
  75iova_get_pad_size(int size, unsigned int limit_pfn)
  76{
  77        unsigned int pad_size = 0;
  78        unsigned int order = ilog2(size);
  79
  80        if (order)
  81                pad_size = (limit_pfn + 1) % (1 << order);
  82
  83        return pad_size;
  84}
  85
  86static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
  87                unsigned long size, unsigned long limit_pfn,
  88                        struct iova *new, bool size_aligned)
  89{
  90        struct rb_node *prev, *curr = NULL;
  91        unsigned long flags;
  92        unsigned long saved_pfn;
  93        unsigned int pad_size = 0;
  94
  95        /* Walk the tree backwards */
  96        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  97        saved_pfn = limit_pfn;
  98        curr = __get_cached_rbnode(iovad, &limit_pfn);
  99        prev = curr;
 100        while (curr) {
 101                struct iova *curr_iova = container_of(curr, struct iova, node);
 102
 103                if (limit_pfn < curr_iova->pfn_lo)
 104                        goto move_left;
 105                else if (limit_pfn < curr_iova->pfn_hi)
 106                        goto adjust_limit_pfn;
 107                else {
 108                        if (size_aligned)
 109                                pad_size = iova_get_pad_size(size, limit_pfn);
 110                        if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
 111                                break;  /* found a free slot */
 112                }
 113adjust_limit_pfn:
 114                limit_pfn = curr_iova->pfn_lo - 1;
 115move_left:
 116                prev = curr;
 117                curr = rb_prev(curr);
 118        }
 119
 120        if (!curr) {
 121                if (size_aligned)
 122                        pad_size = iova_get_pad_size(size, limit_pfn);
 123                if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
 124                        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 125                        return -ENOMEM;
 126                }
 127        }
 128
 129        /* pfn_lo will point to size aligned address if size_aligned is set */
 130        new->pfn_lo = limit_pfn - (size + pad_size) + 1;
 131        new->pfn_hi = new->pfn_lo + size - 1;
 132
 133        /* Insert the new_iova into domain rbtree by holding writer lock */
 134        /* Add new node and rebalance tree. */
 135        {
 136                struct rb_node **entry, *parent = NULL;
 137
 138                /* If we have 'prev', it's a valid place to start the
 139                   insertion. Otherwise, start from the root. */
 140                if (prev)
 141                        entry = &prev;
 142                else
 143                        entry = &iovad->rbroot.rb_node;
 144
 145                /* Figure out where to put new node */
 146                while (*entry) {
 147                        struct iova *this = container_of(*entry,
 148                                                        struct iova, node);
 149                        parent = *entry;
 150
 151                        if (new->pfn_lo < this->pfn_lo)
 152                                entry = &((*entry)->rb_left);
 153                        else if (new->pfn_lo > this->pfn_lo)
 154                                entry = &((*entry)->rb_right);
 155                        else
 156                                BUG(); /* this should not happen */
 157                }
 158
 159                /* Add new node and rebalance tree. */
 160                rb_link_node(&new->node, parent, entry);
 161                rb_insert_color(&new->node, &iovad->rbroot);
 162        }
 163        __cached_rbnode_insert_update(iovad, saved_pfn, new);
 164
 165        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 166
 167
 168        return 0;
 169}
 170
 171static void
 172iova_insert_rbtree(struct rb_root *root, struct iova *iova)
 173{
 174        struct rb_node **new = &(root->rb_node), *parent = NULL;
 175        /* Figure out where to put new node */
 176        while (*new) {
 177                struct iova *this = container_of(*new, struct iova, node);
 178                parent = *new;
 179
 180                if (iova->pfn_lo < this->pfn_lo)
 181                        new = &((*new)->rb_left);
 182                else if (iova->pfn_lo > this->pfn_lo)
 183                        new = &((*new)->rb_right);
 184                else
 185                        BUG(); /* this should not happen */
 186        }
 187        /* Add new node and rebalance tree. */
 188        rb_link_node(&iova->node, parent, new);
 189        rb_insert_color(&iova->node, root);
 190}
 191
 192/**
 193 * alloc_iova - allocates an iova
 194 * @iovad - iova domain in question
 195 * @size - size of page frames to allocate
 196 * @limit_pfn - max limit address
 197 * @size_aligned - set if size_aligned address range is required
 198 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
 199 * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
 200 * flag is set then the allocated address iova->pfn_lo will be naturally
 201 * aligned on roundup_power_of_two(size).
 202 */
 203struct iova *
 204alloc_iova(struct iova_domain *iovad, unsigned long size,
 205        unsigned long limit_pfn,
 206        bool size_aligned)
 207{
 208        unsigned long flags;
 209        struct iova *new_iova;
 210        int ret;
 211
 212        new_iova = alloc_iova_mem();
 213        if (!new_iova)
 214                return NULL;
 215
 216        /* If size aligned is set then round the size to
 217         * to next power of two.
 218         */
 219        if (size_aligned)
 220                size = __roundup_pow_of_two(size);
 221
 222        spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
 223        ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
 224                        new_iova, size_aligned);
 225
 226        spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
 227        if (ret) {
 228                free_iova_mem(new_iova);
 229                return NULL;
 230        }
 231
 232        return new_iova;
 233}
 234
 235/**
 236 * find_iova - find's an iova for a given pfn
 237 * @iovad - iova domain in question.
 238 * pfn - page frame number
 239 * This function finds and returns an iova belonging to the
 240 * given doamin which matches the given pfn.
 241 */
 242struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
 243{
 244        unsigned long flags;
 245        struct rb_node *node;
 246
 247        /* Take the lock so that no other thread is manipulating the rbtree */
 248        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
 249        node = iovad->rbroot.rb_node;
 250        while (node) {
 251                struct iova *iova = container_of(node, struct iova, node);
 252
 253                /* If pfn falls within iova's range, return iova */
 254                if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
 255                        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 256                        /* We are not holding the lock while this iova
 257                         * is referenced by the caller as the same thread
 258                         * which called this function also calls __free_iova()
 259                         * and it is by desing that only one thread can possibly
 260                         * reference a particular iova and hence no conflict.
 261                         */
 262                        return iova;
 263                }
 264
 265                if (pfn < iova->pfn_lo)
 266                        node = node->rb_left;
 267                else if (pfn > iova->pfn_lo)
 268                        node = node->rb_right;
 269        }
 270
 271        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 272        return NULL;
 273}
 274
 275/**
 276 * __free_iova - frees the given iova
 277 * @iovad: iova domain in question.
 278 * @iova: iova in question.
 279 * Frees the given iova belonging to the giving domain
 280 */
 281void
 282__free_iova(struct iova_domain *iovad, struct iova *iova)
 283{
 284        unsigned long flags;
 285
 286        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
 287        __cached_rbnode_delete_update(iovad, iova);
 288        rb_erase(&iova->node, &iovad->rbroot);
 289        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 290        free_iova_mem(iova);
 291}
 292
 293/**
 294 * free_iova - finds and frees the iova for a given pfn
 295 * @iovad: - iova domain in question.
 296 * @pfn: - pfn that is allocated previously
 297 * This functions finds an iova for a given pfn and then
 298 * frees the iova from that domain.
 299 */
 300void
 301free_iova(struct iova_domain *iovad, unsigned long pfn)
 302{
 303        struct iova *iova = find_iova(iovad, pfn);
 304        if (iova)
 305                __free_iova(iovad, iova);
 306
 307}
 308
 309/**
 310 * put_iova_domain - destroys the iova doamin
 311 * @iovad: - iova domain in question.
 312 * All the iova's in that domain are destroyed.
 313 */
 314void put_iova_domain(struct iova_domain *iovad)
 315{
 316        struct rb_node *node;
 317        unsigned long flags;
 318
 319        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
 320        node = rb_first(&iovad->rbroot);
 321        while (node) {
 322                struct iova *iova = container_of(node, struct iova, node);
 323                rb_erase(node, &iovad->rbroot);
 324                free_iova_mem(iova);
 325                node = rb_first(&iovad->rbroot);
 326        }
 327        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 328}
 329
 330static int
 331__is_range_overlap(struct rb_node *node,
 332        unsigned long pfn_lo, unsigned long pfn_hi)
 333{
 334        struct iova *iova = container_of(node, struct iova, node);
 335
 336        if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
 337                return 1;
 338        return 0;
 339}
 340
 341static struct iova *
 342__insert_new_range(struct iova_domain *iovad,
 343        unsigned long pfn_lo, unsigned long pfn_hi)
 344{
 345        struct iova *iova;
 346
 347        iova = alloc_iova_mem();
 348        if (!iova)
 349                return iova;
 350
 351        iova->pfn_hi = pfn_hi;
 352        iova->pfn_lo = pfn_lo;
 353        iova_insert_rbtree(&iovad->rbroot, iova);
 354        return iova;
 355}
 356
 357static void
 358__adjust_overlap_range(struct iova *iova,
 359        unsigned long *pfn_lo, unsigned long *pfn_hi)
 360{
 361        if (*pfn_lo < iova->pfn_lo)
 362                iova->pfn_lo = *pfn_lo;
 363        if (*pfn_hi > iova->pfn_hi)
 364                *pfn_lo = iova->pfn_hi + 1;
 365}
 366
 367/**
 368 * reserve_iova - reserves an iova in the given range
 369 * @iovad: - iova domain pointer
 370 * @pfn_lo: - lower page frame address
 371 * @pfn_hi:- higher pfn adderss
 372 * This function allocates reserves the address range from pfn_lo to pfn_hi so
 373 * that this address is not dished out as part of alloc_iova.
 374 */
 375struct iova *
 376reserve_iova(struct iova_domain *iovad,
 377        unsigned long pfn_lo, unsigned long pfn_hi)
 378{
 379        struct rb_node *node;
 380        unsigned long flags;
 381        struct iova *iova;
 382        unsigned int overlap = 0;
 383
 384        spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
 385        spin_lock(&iovad->iova_rbtree_lock);
 386        for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
 387                if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
 388                        iova = container_of(node, struct iova, node);
 389                        __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
 390                        if ((pfn_lo >= iova->pfn_lo) &&
 391                                (pfn_hi <= iova->pfn_hi))
 392                                goto finish;
 393                        overlap = 1;
 394
 395                } else if (overlap)
 396                                break;
 397        }
 398
 399        /* We are here either becasue this is the first reserver node
 400         * or need to insert remaining non overlap addr range
 401         */
 402        iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
 403finish:
 404
 405        spin_unlock(&iovad->iova_rbtree_lock);
 406        spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
 407        return iova;
 408}
 409
 410/**
 411 * copy_reserved_iova - copies the reserved between domains
 412 * @from: - source doamin from where to copy
 413 * @to: - destination domin where to copy
 414 * This function copies reserved iova's from one doamin to
 415 * other.
 416 */
 417void
 418copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
 419{
 420        unsigned long flags;
 421        struct rb_node *node;
 422
 423        spin_lock_irqsave(&from->iova_alloc_lock, flags);
 424        spin_lock(&from->iova_rbtree_lock);
 425        for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
 426                struct iova *iova = container_of(node, struct iova, node);
 427                struct iova *new_iova;
 428                new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
 429                if (!new_iova)
 430                        printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
 431                                iova->pfn_lo, iova->pfn_lo);
 432        }
 433        spin_unlock(&from->iova_rbtree_lock);
 434        spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
 435}
 436