linux/kernel/resource.c
<<
>>
Prefs
   1/*
   2 *      linux/kernel/resource.c
   3 *
   4 * Copyright (C) 1999   Linus Torvalds
   5 * Copyright (C) 1999   Martin Mares <mj@ucw.cz>
   6 *
   7 * Arbitrary resource management.
   8 */
   9
  10#include <linux/module.h>
  11#include <linux/errno.h>
  12#include <linux/ioport.h>
  13#include <linux/init.h>
  14#include <linux/slab.h>
  15#include <linux/spinlock.h>
  16#include <linux/fs.h>
  17#include <linux/proc_fs.h>
  18#include <linux/seq_file.h>
  19#include <linux/device.h>
  20#include <linux/pfn.h>
  21#include <asm/io.h>
  22
  23
  24struct resource ioport_resource = {
  25        .name   = "PCI IO",
  26        .start  = 0,
  27        .end    = IO_SPACE_LIMIT,
  28        .flags  = IORESOURCE_IO,
  29};
  30EXPORT_SYMBOL(ioport_resource);
  31
  32struct resource iomem_resource = {
  33        .name   = "PCI mem",
  34        .start  = 0,
  35        .end    = -1,
  36        .flags  = IORESOURCE_MEM,
  37};
  38EXPORT_SYMBOL(iomem_resource);
  39
  40static DEFINE_RWLOCK(resource_lock);
  41
  42static void *r_next(struct seq_file *m, void *v, loff_t *pos)
  43{
  44        struct resource *p = v;
  45        (*pos)++;
  46        if (p->child)
  47                return p->child;
  48        while (!p->sibling && p->parent)
  49                p = p->parent;
  50        return p->sibling;
  51}
  52
  53#ifdef CONFIG_PROC_FS
  54
  55enum { MAX_IORES_LEVEL = 5 };
  56
  57static void *r_start(struct seq_file *m, loff_t *pos)
  58        __acquires(resource_lock)
  59{
  60        struct resource *p = m->private;
  61        loff_t l = 0;
  62        read_lock(&resource_lock);
  63        for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
  64                ;
  65        return p;
  66}
  67
  68static void r_stop(struct seq_file *m, void *v)
  69        __releases(resource_lock)
  70{
  71        read_unlock(&resource_lock);
  72}
  73
  74static int r_show(struct seq_file *m, void *v)
  75{
  76        struct resource *root = m->private;
  77        struct resource *r = v, *p;
  78        int width = root->end < 0x10000 ? 4 : 8;
  79        int depth;
  80
  81        for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
  82                if (p->parent == root)
  83                        break;
  84        seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
  85                        depth * 2, "",
  86                        width, (unsigned long long) r->start,
  87                        width, (unsigned long long) r->end,
  88                        r->name ? r->name : "<BAD>");
  89        return 0;
  90}
  91
  92static const struct seq_operations resource_op = {
  93        .start  = r_start,
  94        .next   = r_next,
  95        .stop   = r_stop,
  96        .show   = r_show,
  97};
  98
  99static int ioports_open(struct inode *inode, struct file *file)
 100{
 101        int res = seq_open(file, &resource_op);
 102        if (!res) {
 103                struct seq_file *m = file->private_data;
 104                m->private = &ioport_resource;
 105        }
 106        return res;
 107}
 108
 109static int iomem_open(struct inode *inode, struct file *file)
 110{
 111        int res = seq_open(file, &resource_op);
 112        if (!res) {
 113                struct seq_file *m = file->private_data;
 114                m->private = &iomem_resource;
 115        }
 116        return res;
 117}
 118
 119static const struct file_operations proc_ioports_operations = {
 120        .open           = ioports_open,
 121        .read           = seq_read,
 122        .llseek         = seq_lseek,
 123        .release        = seq_release,
 124};
 125
 126static const struct file_operations proc_iomem_operations = {
 127        .open           = iomem_open,
 128        .read           = seq_read,
 129        .llseek         = seq_lseek,
 130        .release        = seq_release,
 131};
 132
 133static int __init ioresources_init(void)
 134{
 135        proc_create("ioports", 0, NULL, &proc_ioports_operations);
 136        proc_create("iomem", 0, NULL, &proc_iomem_operations);
 137        return 0;
 138}
 139__initcall(ioresources_init);
 140
 141#endif /* CONFIG_PROC_FS */
 142
 143/* Return the conflict entry if you can't request it */
 144static struct resource * __request_resource(struct resource *root, struct resource *new)
 145{
 146        resource_size_t start = new->start;
 147        resource_size_t end = new->end;
 148        struct resource *tmp, **p;
 149
 150        if (end < start)
 151                return root;
 152        if (start < root->start)
 153                return root;
 154        if (end > root->end)
 155                return root;
 156        p = &root->child;
 157        for (;;) {
 158                tmp = *p;
 159                if (!tmp || tmp->start > end) {
 160                        new->sibling = tmp;
 161                        *p = new;
 162                        new->parent = root;
 163                        return NULL;
 164                }
 165                p = &tmp->sibling;
 166                if (tmp->end < start)
 167                        continue;
 168                return tmp;
 169        }
 170}
 171
 172static int __release_resource(struct resource *old)
 173{
 174        struct resource *tmp, **p;
 175
 176        p = &old->parent->child;
 177        for (;;) {
 178                tmp = *p;
 179                if (!tmp)
 180                        break;
 181                if (tmp == old) {
 182                        *p = tmp->sibling;
 183                        old->parent = NULL;
 184                        return 0;
 185                }
 186                p = &tmp->sibling;
 187        }
 188        return -EINVAL;
 189}
 190
 191/**
 192 * request_resource - request and reserve an I/O or memory resource
 193 * @root: root resource descriptor
 194 * @new: resource descriptor desired by caller
 195 *
 196 * Returns 0 for success, negative error code on error.
 197 */
 198int request_resource(struct resource *root, struct resource *new)
 199{
 200        struct resource *conflict;
 201
 202        write_lock(&resource_lock);
 203        conflict = __request_resource(root, new);
 204        write_unlock(&resource_lock);
 205        return conflict ? -EBUSY : 0;
 206}
 207
 208EXPORT_SYMBOL(request_resource);
 209
 210/**
 211 * release_resource - release a previously reserved resource
 212 * @old: resource pointer
 213 */
 214int release_resource(struct resource *old)
 215{
 216        int retval;
 217
 218        write_lock(&resource_lock);
 219        retval = __release_resource(old);
 220        write_unlock(&resource_lock);
 221        return retval;
 222}
 223
 224EXPORT_SYMBOL(release_resource);
 225
 226#if defined(CONFIG_MEMORY_HOTPLUG) && !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
 227/*
 228 * Finds the lowest memory reosurce exists within [res->start.res->end)
 229 * the caller must specify res->start, res->end, res->flags.
 230 * If found, returns 0, res is overwritten, if not found, returns -1.
 231 */
 232static int find_next_system_ram(struct resource *res)
 233{
 234        resource_size_t start, end;
 235        struct resource *p;
 236
 237        BUG_ON(!res);
 238
 239        start = res->start;
 240        end = res->end;
 241        BUG_ON(start >= end);
 242
 243        read_lock(&resource_lock);
 244        for (p = iomem_resource.child; p ; p = p->sibling) {
 245                /* system ram is just marked as IORESOURCE_MEM */
 246                if (p->flags != res->flags)
 247                        continue;
 248                if (p->start > end) {
 249                        p = NULL;
 250                        break;
 251                }
 252                if ((p->end >= start) && (p->start < end))
 253                        break;
 254        }
 255        read_unlock(&resource_lock);
 256        if (!p)
 257                return -1;
 258        /* copy data */
 259        if (res->start < p->start)
 260                res->start = p->start;
 261        if (res->end > p->end)
 262                res->end = p->end;
 263        return 0;
 264}
 265int
 266walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
 267                        int (*func)(unsigned long, unsigned long, void *))
 268{
 269        struct resource res;
 270        unsigned long pfn, len;
 271        u64 orig_end;
 272        int ret = -1;
 273        res.start = (u64) start_pfn << PAGE_SHIFT;
 274        res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
 275        res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 276        orig_end = res.end;
 277        while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
 278                pfn = (unsigned long)(res.start >> PAGE_SHIFT);
 279                len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT);
 280                ret = (*func)(pfn, len, arg);
 281                if (ret)
 282                        break;
 283                res.start = res.end + 1;
 284                res.end = orig_end;
 285        }
 286        return ret;
 287}
 288
 289#endif
 290
 291/*
 292 * Find empty slot in the resource tree given range and alignment.
 293 */
 294static int find_resource(struct resource *root, struct resource *new,
 295                         resource_size_t size, resource_size_t min,
 296                         resource_size_t max, resource_size_t align,
 297                         void (*alignf)(void *, struct resource *,
 298                                        resource_size_t, resource_size_t),
 299                         void *alignf_data)
 300{
 301        struct resource *this = root->child;
 302
 303        new->start = root->start;
 304        /*
 305         * Skip past an allocated resource that starts at 0, since the assignment
 306         * of this->start - 1 to new->end below would cause an underflow.
 307         */
 308        if (this && this->start == 0) {
 309                new->start = this->end + 1;
 310                this = this->sibling;
 311        }
 312        for(;;) {
 313                if (this)
 314                        new->end = this->start - 1;
 315                else
 316                        new->end = root->end;
 317                if (new->start < min)
 318                        new->start = min;
 319                if (new->end > max)
 320                        new->end = max;
 321                new->start = ALIGN(new->start, align);
 322                if (alignf)
 323                        alignf(alignf_data, new, size, align);
 324                if (new->start < new->end && new->end - new->start >= size - 1) {
 325                        new->end = new->start + size - 1;
 326                        return 0;
 327                }
 328                if (!this)
 329                        break;
 330                new->start = this->end + 1;
 331                this = this->sibling;
 332        }
 333        return -EBUSY;
 334}
 335
 336/**
 337 * allocate_resource - allocate empty slot in the resource tree given range & alignment
 338 * @root: root resource descriptor
 339 * @new: resource descriptor desired by caller
 340 * @size: requested resource region size
 341 * @min: minimum size to allocate
 342 * @max: maximum size to allocate
 343 * @align: alignment requested, in bytes
 344 * @alignf: alignment function, optional, called if not NULL
 345 * @alignf_data: arbitrary data to pass to the @alignf function
 346 */
 347int allocate_resource(struct resource *root, struct resource *new,
 348                      resource_size_t size, resource_size_t min,
 349                      resource_size_t max, resource_size_t align,
 350                      void (*alignf)(void *, struct resource *,
 351                                     resource_size_t, resource_size_t),
 352                      void *alignf_data)
 353{
 354        int err;
 355
 356        write_lock(&resource_lock);
 357        err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
 358        if (err >= 0 && __request_resource(root, new))
 359                err = -EBUSY;
 360        write_unlock(&resource_lock);
 361        return err;
 362}
 363
 364EXPORT_SYMBOL(allocate_resource);
 365
 366/*
 367 * Insert a resource into the resource tree. If successful, return NULL,
 368 * otherwise return the conflicting resource (compare to __request_resource())
 369 */
 370static struct resource * __insert_resource(struct resource *parent, struct resource *new)
 371{
 372        struct resource *first, *next;
 373
 374        for (;; parent = first) {
 375                first = __request_resource(parent, new);
 376                if (!first)
 377                        return first;
 378
 379                if (first == parent)
 380                        return first;
 381
 382                if ((first->start > new->start) || (first->end < new->end))
 383                        break;
 384                if ((first->start == new->start) && (first->end == new->end))
 385                        break;
 386        }
 387
 388        for (next = first; ; next = next->sibling) {
 389                /* Partial overlap? Bad, and unfixable */
 390                if (next->start < new->start || next->end > new->end)
 391                        return next;
 392                if (!next->sibling)
 393                        break;
 394                if (next->sibling->start > new->end)
 395                        break;
 396        }
 397
 398        new->parent = parent;
 399        new->sibling = next->sibling;
 400        new->child = first;
 401
 402        next->sibling = NULL;
 403        for (next = first; next; next = next->sibling)
 404                next->parent = new;
 405
 406        if (parent->child == first) {
 407                parent->child = new;
 408        } else {
 409                next = parent->child;
 410                while (next->sibling != first)
 411                        next = next->sibling;
 412                next->sibling = new;
 413        }
 414        return NULL;
 415}
 416
 417/**
 418 * insert_resource - Inserts a resource in the resource tree
 419 * @parent: parent of the new resource
 420 * @new: new resource to insert
 421 *
 422 * Returns 0 on success, -EBUSY if the resource can't be inserted.
 423 *
 424 * This function is equivalent to request_resource when no conflict
 425 * happens. If a conflict happens, and the conflicting resources
 426 * entirely fit within the range of the new resource, then the new
 427 * resource is inserted and the conflicting resources become children of
 428 * the new resource.
 429 */
 430int insert_resource(struct resource *parent, struct resource *new)
 431{
 432        struct resource *conflict;
 433
 434        write_lock(&resource_lock);
 435        conflict = __insert_resource(parent, new);
 436        write_unlock(&resource_lock);
 437        return conflict ? -EBUSY : 0;
 438}
 439
 440/**
 441 * insert_resource_expand_to_fit - Insert a resource into the resource tree
 442 * @root: root resource descriptor
 443 * @new: new resource to insert
 444 *
 445 * Insert a resource into the resource tree, possibly expanding it in order
 446 * to make it encompass any conflicting resources.
 447 */
 448void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
 449{
 450        if (new->parent)
 451                return;
 452
 453        write_lock(&resource_lock);
 454        for (;;) {
 455                struct resource *conflict;
 456
 457                conflict = __insert_resource(root, new);
 458                if (!conflict)
 459                        break;
 460                if (conflict == root)
 461                        break;
 462
 463                /* Ok, expand resource to cover the conflict, then try again .. */
 464                if (conflict->start < new->start)
 465                        new->start = conflict->start;
 466                if (conflict->end > new->end)
 467                        new->end = conflict->end;
 468
 469                printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
 470        }
 471        write_unlock(&resource_lock);
 472}
 473
 474/**
 475 * adjust_resource - modify a resource's start and size
 476 * @res: resource to modify
 477 * @start: new start value
 478 * @size: new size
 479 *
 480 * Given an existing resource, change its start and size to match the
 481 * arguments.  Returns 0 on success, -EBUSY if it can't fit.
 482 * Existing children of the resource are assumed to be immutable.
 483 */
 484int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size)
 485{
 486        struct resource *tmp, *parent = res->parent;
 487        resource_size_t end = start + size - 1;
 488        int result = -EBUSY;
 489
 490        write_lock(&resource_lock);
 491
 492        if ((start < parent->start) || (end > parent->end))
 493                goto out;
 494
 495        for (tmp = res->child; tmp; tmp = tmp->sibling) {
 496                if ((tmp->start < start) || (tmp->end > end))
 497                        goto out;
 498        }
 499
 500        if (res->sibling && (res->sibling->start <= end))
 501                goto out;
 502
 503        tmp = parent->child;
 504        if (tmp != res) {
 505                while (tmp->sibling != res)
 506                        tmp = tmp->sibling;
 507                if (start <= tmp->end)
 508                        goto out;
 509        }
 510
 511        res->start = start;
 512        res->end = end;
 513        result = 0;
 514
 515 out:
 516        write_unlock(&resource_lock);
 517        return result;
 518}
 519
 520static void __init __reserve_region_with_split(struct resource *root,
 521                resource_size_t start, resource_size_t end,
 522                const char *name)
 523{
 524        struct resource *parent = root;
 525        struct resource *conflict;
 526        struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
 527
 528        if (!res)
 529                return;
 530
 531        res->name = name;
 532        res->start = start;
 533        res->end = end;
 534        res->flags = IORESOURCE_BUSY;
 535
 536        for (;;) {
 537                conflict = __request_resource(parent, res);
 538                if (!conflict)
 539                        break;
 540                if (conflict != parent) {
 541                        parent = conflict;
 542                        if (!(conflict->flags & IORESOURCE_BUSY))
 543                                continue;
 544                }
 545
 546                /* Uhhuh, that didn't work out.. */
 547                kfree(res);
 548                res = NULL;
 549                break;
 550        }
 551
 552        if (!res) {
 553                /* failed, split and try again */
 554
 555                /* conflict covered whole area */
 556                if (conflict->start <= start && conflict->end >= end)
 557                        return;
 558
 559                if (conflict->start > start)
 560                        __reserve_region_with_split(root, start, conflict->start-1, name);
 561                if (!(conflict->flags & IORESOURCE_BUSY)) {
 562                        resource_size_t common_start, common_end;
 563
 564                        common_start = max(conflict->start, start);
 565                        common_end = min(conflict->end, end);
 566                        if (common_start < common_end)
 567                                __reserve_region_with_split(root, common_start, common_end, name);
 568                }
 569                if (conflict->end < end)
 570                        __reserve_region_with_split(root, conflict->end+1, end, name);
 571        }
 572
 573}
 574
 575void __init reserve_region_with_split(struct resource *root,
 576                resource_size_t start, resource_size_t end,
 577                const char *name)
 578{
 579        write_lock(&resource_lock);
 580        __reserve_region_with_split(root, start, end, name);
 581        write_unlock(&resource_lock);
 582}
 583
 584EXPORT_SYMBOL(adjust_resource);
 585
 586/**
 587 * resource_alignment - calculate resource's alignment
 588 * @res: resource pointer
 589 *
 590 * Returns alignment on success, 0 (invalid alignment) on failure.
 591 */
 592resource_size_t resource_alignment(struct resource *res)
 593{
 594        switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
 595        case IORESOURCE_SIZEALIGN:
 596                return resource_size(res);
 597        case IORESOURCE_STARTALIGN:
 598                return res->start;
 599        default:
 600                return 0;
 601        }
 602}
 603
 604/*
 605 * This is compatibility stuff for IO resources.
 606 *
 607 * Note how this, unlike the above, knows about
 608 * the IO flag meanings (busy etc).
 609 *
 610 * request_region creates a new busy region.
 611 *
 612 * check_region returns non-zero if the area is already busy.
 613 *
 614 * release_region releases a matching busy region.
 615 */
 616
 617/**
 618 * __request_region - create a new busy resource region
 619 * @parent: parent resource descriptor
 620 * @start: resource start address
 621 * @n: resource region size
 622 * @name: reserving caller's ID string
 623 */
 624struct resource * __request_region(struct resource *parent,
 625                                   resource_size_t start, resource_size_t n,
 626                                   const char *name)
 627{
 628        struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
 629
 630        if (!res)
 631                return NULL;
 632
 633        res->name = name;
 634        res->start = start;
 635        res->end = start + n - 1;
 636        res->flags = IORESOURCE_BUSY;
 637
 638        write_lock(&resource_lock);
 639
 640        for (;;) {
 641                struct resource *conflict;
 642
 643                conflict = __request_resource(parent, res);
 644                if (!conflict)
 645                        break;
 646                if (conflict != parent) {
 647                        parent = conflict;
 648                        if (!(conflict->flags & IORESOURCE_BUSY))
 649                                continue;
 650                }
 651
 652                /* Uhhuh, that didn't work out.. */
 653                kfree(res);
 654                res = NULL;
 655                break;
 656        }
 657        write_unlock(&resource_lock);
 658        return res;
 659}
 660EXPORT_SYMBOL(__request_region);
 661
 662/**
 663 * __check_region - check if a resource region is busy or free
 664 * @parent: parent resource descriptor
 665 * @start: resource start address
 666 * @n: resource region size
 667 *
 668 * Returns 0 if the region is free at the moment it is checked,
 669 * returns %-EBUSY if the region is busy.
 670 *
 671 * NOTE:
 672 * This function is deprecated because its use is racy.
 673 * Even if it returns 0, a subsequent call to request_region()
 674 * may fail because another driver etc. just allocated the region.
 675 * Do NOT use it.  It will be removed from the kernel.
 676 */
 677int __check_region(struct resource *parent, resource_size_t start,
 678                        resource_size_t n)
 679{
 680        struct resource * res;
 681
 682        res = __request_region(parent, start, n, "check-region");
 683        if (!res)
 684                return -EBUSY;
 685
 686        release_resource(res);
 687        kfree(res);
 688        return 0;
 689}
 690EXPORT_SYMBOL(__check_region);
 691
 692/**
 693 * __release_region - release a previously reserved resource region
 694 * @parent: parent resource descriptor
 695 * @start: resource start address
 696 * @n: resource region size
 697 *
 698 * The described resource region must match a currently busy region.
 699 */
 700void __release_region(struct resource *parent, resource_size_t start,
 701                        resource_size_t n)
 702{
 703        struct resource **p;
 704        resource_size_t end;
 705
 706        p = &parent->child;
 707        end = start + n - 1;
 708
 709        write_lock(&resource_lock);
 710
 711        for (;;) {
 712                struct resource *res = *p;
 713
 714                if (!res)
 715                        break;
 716                if (res->start <= start && res->end >= end) {
 717                        if (!(res->flags & IORESOURCE_BUSY)) {
 718                                p = &res->child;
 719                                continue;
 720                        }
 721                        if (res->start != start || res->end != end)
 722                                break;
 723                        *p = res->sibling;
 724                        write_unlock(&resource_lock);
 725                        kfree(res);
 726                        return;
 727                }
 728                p = &res->sibling;
 729        }
 730
 731        write_unlock(&resource_lock);
 732
 733        printk(KERN_WARNING "Trying to free nonexistent resource "
 734                "<%016llx-%016llx>\n", (unsigned long long)start,
 735                (unsigned long long)end);
 736}
 737EXPORT_SYMBOL(__release_region);
 738
 739/*
 740 * Managed region resource
 741 */
 742struct region_devres {
 743        struct resource *parent;
 744        resource_size_t start;
 745        resource_size_t n;
 746};
 747
 748static void devm_region_release(struct device *dev, void *res)
 749{
 750        struct region_devres *this = res;
 751
 752        __release_region(this->parent, this->start, this->n);
 753}
 754
 755static int devm_region_match(struct device *dev, void *res, void *match_data)
 756{
 757        struct region_devres *this = res, *match = match_data;
 758
 759        return this->parent == match->parent &&
 760                this->start == match->start && this->n == match->n;
 761}
 762
 763struct resource * __devm_request_region(struct device *dev,
 764                                struct resource *parent, resource_size_t start,
 765                                resource_size_t n, const char *name)
 766{
 767        struct region_devres *dr = NULL;
 768        struct resource *res;
 769
 770        dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
 771                          GFP_KERNEL);
 772        if (!dr)
 773                return NULL;
 774
 775        dr->parent = parent;
 776        dr->start = start;
 777        dr->n = n;
 778
 779        res = __request_region(parent, start, n, name);
 780        if (res)
 781                devres_add(dev, dr);
 782        else
 783                devres_free(dr);
 784
 785        return res;
 786}
 787EXPORT_SYMBOL(__devm_request_region);
 788
 789void __devm_release_region(struct device *dev, struct resource *parent,
 790                           resource_size_t start, resource_size_t n)
 791{
 792        struct region_devres match_data = { parent, start, n };
 793
 794        __release_region(parent, start, n);
 795        WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
 796                               &match_data));
 797}
 798EXPORT_SYMBOL(__devm_release_region);
 799
 800/*
 801 * Called from init/main.c to reserve IO ports.
 802 */
 803#define MAXRESERVE 4
 804static int __init reserve_setup(char *str)
 805{
 806        static int reserved;
 807        static struct resource reserve[MAXRESERVE];
 808
 809        for (;;) {
 810                int io_start, io_num;
 811                int x = reserved;
 812
 813                if (get_option (&str, &io_start) != 2)
 814                        break;
 815                if (get_option (&str, &io_num)   == 0)
 816                        break;
 817                if (x < MAXRESERVE) {
 818                        struct resource *res = reserve + x;
 819                        res->name = "reserved";
 820                        res->start = io_start;
 821                        res->end = io_start + io_num - 1;
 822                        res->flags = IORESOURCE_BUSY;
 823                        res->child = NULL;
 824                        if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0)
 825                                reserved = x+1;
 826                }
 827        }
 828        return 1;
 829}
 830
 831__setup("reserve=", reserve_setup);
 832
 833/*
 834 * Check if the requested addr and size spans more than any slot in the
 835 * iomem resource tree.
 836 */
 837int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
 838{
 839        struct resource *p = &iomem_resource;
 840        int err = 0;
 841        loff_t l;
 842
 843        read_lock(&resource_lock);
 844        for (p = p->child; p ; p = r_next(NULL, p, &l)) {
 845                /*
 846                 * We can probably skip the resources without
 847                 * IORESOURCE_IO attribute?
 848                 */
 849                if (p->start >= addr + size)
 850                        continue;
 851                if (p->end < addr)
 852                        continue;
 853                if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
 854                    PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
 855                        continue;
 856                printk(KERN_WARNING "resource map sanity check conflict: "
 857                       "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
 858                       (unsigned long long)addr,
 859                       (unsigned long long)(addr + size - 1),
 860                       (unsigned long long)p->start,
 861                       (unsigned long long)p->end,
 862                       p->name);
 863                err = -1;
 864                break;
 865        }
 866        read_unlock(&resource_lock);
 867
 868        return err;
 869}
 870