linux/kernel/kexec.c
<<
>>
Prefs
   1/*
   2 * kexec.c - kexec system call
   3 * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
   4 *
   5 * This source code is licensed under the GNU General Public License,
   6 * Version 2.  See the file COPYING for more details.
   7 */
   8
   9#include <linux/capability.h>
  10#include <linux/mm.h>
  11#include <linux/file.h>
  12#include <linux/slab.h>
  13#include <linux/fs.h>
  14#include <linux/kexec.h>
  15#include <linux/spinlock.h>
  16#include <linux/list.h>
  17#include <linux/highmem.h>
  18#include <linux/syscalls.h>
  19#include <linux/reboot.h>
  20#include <linux/syscalls.h>
  21#include <linux/ioport.h>
  22#include <linux/hardirq.h>
  23
  24#include <asm/page.h>
  25#include <asm/uaccess.h>
  26#include <asm/io.h>
  27#include <asm/system.h>
  28#include <asm/semaphore.h>
  29
  30/* Per cpu memory for storing cpu states in case of system crash. */
  31note_buf_t* crash_notes;
  32
  33/* Location of the reserved area for the crash kernel */
  34struct resource crashk_res = {
  35        .name  = "Crash kernel",
  36        .start = 0,
  37        .end   = 0,
  38        .flags = IORESOURCE_BUSY | IORESOURCE_MEM
  39};
  40
  41int kexec_should_crash(struct task_struct *p)
  42{
  43        if (in_interrupt() || !p->pid || is_init(p) || panic_on_oops)
  44                return 1;
  45        return 0;
  46}
  47
  48/*
  49 * When kexec transitions to the new kernel there is a one-to-one
  50 * mapping between physical and virtual addresses.  On processors
  51 * where you can disable the MMU this is trivial, and easy.  For
  52 * others it is still a simple predictable page table to setup.
  53 *
  54 * In that environment kexec copies the new kernel to its final
  55 * resting place.  This means I can only support memory whose
  56 * physical address can fit in an unsigned long.  In particular
  57 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
  58 * If the assembly stub has more restrictive requirements
  59 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
  60 * defined more restrictively in <asm/kexec.h>.
  61 *
  62 * The code for the transition from the current kernel to the
  63 * the new kernel is placed in the control_code_buffer, whose size
  64 * is given by KEXEC_CONTROL_CODE_SIZE.  In the best case only a single
  65 * page of memory is necessary, but some architectures require more.
  66 * Because this memory must be identity mapped in the transition from
  67 * virtual to physical addresses it must live in the range
  68 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
  69 * modifiable.
  70 *
  71 * The assembly stub in the control code buffer is passed a linked list
  72 * of descriptor pages detailing the source pages of the new kernel,
  73 * and the destination addresses of those source pages.  As this data
  74 * structure is not used in the context of the current OS, it must
  75 * be self-contained.
  76 *
  77 * The code has been made to work with highmem pages and will use a
  78 * destination page in its final resting place (if it happens
  79 * to allocate it).  The end product of this is that most of the
  80 * physical address space, and most of RAM can be used.
  81 *
  82 * Future directions include:
  83 *  - allocating a page table with the control code buffer identity
  84 *    mapped, to simplify machine_kexec and make kexec_on_panic more
  85 *    reliable.
  86 */
  87
  88/*
  89 * KIMAGE_NO_DEST is an impossible destination address..., for
  90 * allocating pages whose destination address we do not care about.
  91 */
  92#define KIMAGE_NO_DEST (-1UL)
  93
  94static int kimage_is_destination_range(struct kimage *image,
  95                                       unsigned long start, unsigned long end);
  96static struct page *kimage_alloc_page(struct kimage *image,
  97                                       gfp_t gfp_mask,
  98                                       unsigned long dest);
  99
 100static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
 101                            unsigned long nr_segments,
 102                            struct kexec_segment __user *segments)
 103{
 104        size_t segment_bytes;
 105        struct kimage *image;
 106        unsigned long i;
 107        int result;
 108
 109        /* Allocate a controlling structure */
 110        result = -ENOMEM;
 111        image = kmalloc(sizeof(*image), GFP_KERNEL);
 112        if (!image)
 113                goto out;
 114
 115        memset(image, 0, sizeof(*image));
 116        image->head = 0;
 117        image->entry = &image->head;
 118        image->last_entry = &image->head;
 119        image->control_page = ~0; /* By default this does not apply */
 120        image->start = entry;
 121        image->type = KEXEC_TYPE_DEFAULT;
 122
 123        /* Initialize the list of control pages */
 124        INIT_LIST_HEAD(&image->control_pages);
 125
 126        /* Initialize the list of destination pages */
 127        INIT_LIST_HEAD(&image->dest_pages);
 128
 129        /* Initialize the list of unuseable pages */
 130        INIT_LIST_HEAD(&image->unuseable_pages);
 131
 132        /* Read in the segments */
 133        image->nr_segments = nr_segments;
 134        segment_bytes = nr_segments * sizeof(*segments);
 135        result = copy_from_user(image->segment, segments, segment_bytes);
 136        if (result)
 137                goto out;
 138
 139        /*
 140         * Verify we have good destination addresses.  The caller is
 141         * responsible for making certain we don't attempt to load
 142         * the new image into invalid or reserved areas of RAM.  This
 143         * just verifies it is an address we can use.
 144         *
 145         * Since the kernel does everything in page size chunks ensure
 146         * the destination addreses are page aligned.  Too many
 147         * special cases crop of when we don't do this.  The most
 148         * insidious is getting overlapping destination addresses
 149         * simply because addresses are changed to page size
 150         * granularity.
 151         */
 152        result = -EADDRNOTAVAIL;
 153        for (i = 0; i < nr_segments; i++) {
 154                unsigned long mstart, mend;
 155
 156                mstart = image->segment[i].mem;
 157                mend   = mstart + image->segment[i].memsz;
 158                if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
 159                        goto out;
 160                if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
 161                        goto out;
 162        }
 163
 164        /* Verify our destination addresses do not overlap.
 165         * If we alloed overlapping destination addresses
 166         * through very weird things can happen with no
 167         * easy explanation as one segment stops on another.
 168         */
 169        result = -EINVAL;
 170        for (i = 0; i < nr_segments; i++) {
 171                unsigned long mstart, mend;
 172                unsigned long j;
 173
 174                mstart = image->segment[i].mem;
 175                mend   = mstart + image->segment[i].memsz;
 176                for (j = 0; j < i; j++) {
 177                        unsigned long pstart, pend;
 178                        pstart = image->segment[j].mem;
 179                        pend   = pstart + image->segment[j].memsz;
 180                        /* Do the segments overlap ? */
 181                        if ((mend > pstart) && (mstart < pend))
 182                                goto out;
 183                }
 184        }
 185
 186        /* Ensure our buffer sizes are strictly less than
 187         * our memory sizes.  This should always be the case,
 188         * and it is easier to check up front than to be surprised
 189         * later on.
 190         */
 191        result = -EINVAL;
 192        for (i = 0; i < nr_segments; i++) {
 193                if (image->segment[i].bufsz > image->segment[i].memsz)
 194                        goto out;
 195        }
 196
 197        result = 0;
 198out:
 199        if (result == 0)
 200                *rimage = image;
 201        else
 202                kfree(image);
 203
 204        return result;
 205
 206}
 207
 208static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
 209                                unsigned long nr_segments,
 210                                struct kexec_segment __user *segments)
 211{
 212        int result;
 213        struct kimage *image;
 214
 215        /* Allocate and initialize a controlling structure */
 216        image = NULL;
 217        result = do_kimage_alloc(&image, entry, nr_segments, segments);
 218        if (result)
 219                goto out;
 220
 221        *rimage = image;
 222
 223        /*
 224         * Find a location for the control code buffer, and add it
 225         * the vector of segments so that it's pages will also be
 226         * counted as destination pages.
 227         */
 228        result = -ENOMEM;
 229        image->control_code_page = kimage_alloc_control_pages(image,
 230                                           get_order(KEXEC_CONTROL_CODE_SIZE));
 231        if (!image->control_code_page) {
 232                printk(KERN_ERR "Could not allocate control_code_buffer\n");
 233                goto out;
 234        }
 235
 236        result = 0;
 237 out:
 238        if (result == 0)
 239                *rimage = image;
 240        else
 241                kfree(image);
 242
 243        return result;
 244}
 245
 246static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
 247                                unsigned long nr_segments,
 248                                struct kexec_segment __user *segments)
 249{
 250        int result;
 251        struct kimage *image;
 252        unsigned long i;
 253
 254        image = NULL;
 255        /* Verify we have a valid entry point */
 256        if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
 257                result = -EADDRNOTAVAIL;
 258                goto out;
 259        }
 260
 261        /* Allocate and initialize a controlling structure */
 262        result = do_kimage_alloc(&image, entry, nr_segments, segments);
 263        if (result)
 264                goto out;
 265
 266        /* Enable the special crash kernel control page
 267         * allocation policy.
 268         */
 269        image->control_page = crashk_res.start;
 270        image->type = KEXEC_TYPE_CRASH;
 271
 272        /*
 273         * Verify we have good destination addresses.  Normally
 274         * the caller is responsible for making certain we don't
 275         * attempt to load the new image into invalid or reserved
 276         * areas of RAM.  But crash kernels are preloaded into a
 277         * reserved area of ram.  We must ensure the addresses
 278         * are in the reserved area otherwise preloading the
 279         * kernel could corrupt things.
 280         */
 281        result = -EADDRNOTAVAIL;
 282        for (i = 0; i < nr_segments; i++) {
 283                unsigned long mstart, mend;
 284
 285                mstart = image->segment[i].mem;
 286                mend = mstart + image->segment[i].memsz - 1;
 287                /* Ensure we are within the crash kernel limits */
 288                if ((mstart < crashk_res.start) || (mend > crashk_res.end))
 289                        goto out;
 290        }
 291
 292        /*
 293         * Find a location for the control code buffer, and add
 294         * the vector of segments so that it's pages will also be
 295         * counted as destination pages.
 296         */
 297        result = -ENOMEM;
 298        image->control_code_page = kimage_alloc_control_pages(image,
 299                                           get_order(KEXEC_CONTROL_CODE_SIZE));
 300        if (!image->control_code_page) {
 301                printk(KERN_ERR "Could not allocate control_code_buffer\n");
 302                goto out;
 303        }
 304
 305        result = 0;
 306out:
 307        if (result == 0)
 308                *rimage = image;
 309        else
 310                kfree(image);
 311
 312        return result;
 313}
 314
 315static int kimage_is_destination_range(struct kimage *image,
 316                                        unsigned long start,
 317                                        unsigned long end)
 318{
 319        unsigned long i;
 320
 321        for (i = 0; i < image->nr_segments; i++) {
 322                unsigned long mstart, mend;
 323
 324                mstart = image->segment[i].mem;
 325                mend = mstart + image->segment[i].memsz;
 326                if ((end > mstart) && (start < mend))
 327                        return 1;
 328        }
 329
 330        return 0;
 331}
 332
 333static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
 334{
 335        struct page *pages;
 336
 337        pages = alloc_pages(gfp_mask, order);
 338        if (pages) {
 339                unsigned int count, i;
 340                pages->mapping = NULL;
 341                set_page_private(pages, order);
 342                count = 1 << order;
 343                for (i = 0; i < count; i++)
 344                        SetPageReserved(pages + i);
 345        }
 346
 347        return pages;
 348}
 349
 350static void kimage_free_pages(struct page *page)
 351{
 352        unsigned int order, count, i;
 353
 354        order = page_private(page);
 355        count = 1 << order;
 356        for (i = 0; i < count; i++)
 357                ClearPageReserved(page + i);
 358        __free_pages(page, order);
 359}
 360
 361static void kimage_free_page_list(struct list_head *list)
 362{
 363        struct list_head *pos, *next;
 364
 365        list_for_each_safe(pos, next, list) {
 366                struct page *page;
 367
 368                page = list_entry(pos, struct page, lru);
 369                list_del(&page->lru);
 370                kimage_free_pages(page);
 371        }
 372}
 373
 374static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
 375                                                        unsigned int order)
 376{
 377        /* Control pages are special, they are the intermediaries
 378         * that are needed while we copy the rest of the pages
 379         * to their final resting place.  As such they must
 380         * not conflict with either the destination addresses
 381         * or memory the kernel is already using.
 382         *
 383         * The only case where we really need more than one of
 384         * these are for architectures where we cannot disable
 385         * the MMU and must instead generate an identity mapped
 386         * page table for all of the memory.
 387         *
 388         * At worst this runs in O(N) of the image size.
 389         */
 390        struct list_head extra_pages;
 391        struct page *pages;
 392        unsigned int count;
 393
 394        count = 1 << order;
 395        INIT_LIST_HEAD(&extra_pages);
 396
 397        /* Loop while I can allocate a page and the page allocated
 398         * is a destination page.
 399         */
 400        do {
 401                unsigned long pfn, epfn, addr, eaddr;
 402
 403                pages = kimage_alloc_pages(GFP_KERNEL, order);
 404                if (!pages)
 405                        break;
 406                pfn   = page_to_pfn(pages);
 407                epfn  = pfn + count;
 408                addr  = pfn << PAGE_SHIFT;
 409                eaddr = epfn << PAGE_SHIFT;
 410                if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
 411                              kimage_is_destination_range(image, addr, eaddr)) {
 412                        list_add(&pages->lru, &extra_pages);
 413                        pages = NULL;
 414                }
 415        } while (!pages);
 416
 417        if (pages) {
 418                /* Remember the allocated page... */
 419                list_add(&pages->lru, &image->control_pages);
 420
 421                /* Because the page is already in it's destination
 422                 * location we will never allocate another page at
 423                 * that address.  Therefore kimage_alloc_pages
 424                 * will not return it (again) and we don't need
 425                 * to give it an entry in image->segment[].
 426                 */
 427        }
 428        /* Deal with the destination pages I have inadvertently allocated.
 429         *
 430         * Ideally I would convert multi-page allocations into single
 431         * page allocations, and add everyting to image->dest_pages.
 432         *
 433         * For now it is simpler to just free the pages.
 434         */
 435        kimage_free_page_list(&extra_pages);
 436
 437        return pages;
 438}
 439
 440static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
 441                                                      unsigned int order)
 442{
 443        /* Control pages are special, they are the intermediaries
 444         * that are needed while we copy the rest of the pages
 445         * to their final resting place.  As such they must
 446         * not conflict with either the destination addresses
 447         * or memory the kernel is already using.
 448         *
 449         * Control pages are also the only pags we must allocate
 450         * when loading a crash kernel.  All of the other pages
 451         * are specified by the segments and we just memcpy
 452         * into them directly.
 453         *
 454         * The only case where we really need more than one of
 455         * these are for architectures where we cannot disable
 456         * the MMU and must instead generate an identity mapped
 457         * page table for all of the memory.
 458         *
 459         * Given the low demand this implements a very simple
 460         * allocator that finds the first hole of the appropriate
 461         * size in the reserved memory region, and allocates all
 462         * of the memory up to and including the hole.
 463         */
 464        unsigned long hole_start, hole_end, size;
 465        struct page *pages;
 466
 467        pages = NULL;
 468        size = (1 << order) << PAGE_SHIFT;
 469        hole_start = (image->control_page + (size - 1)) & ~(size - 1);
 470        hole_end   = hole_start + size - 1;
 471        while (hole_end <= crashk_res.end) {
 472                unsigned long i;
 473
 474                if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT)
 475                        break;
 476                if (hole_end > crashk_res.end)
 477                        break;
 478                /* See if I overlap any of the segments */
 479                for (i = 0; i < image->nr_segments; i++) {
 480                        unsigned long mstart, mend;
 481
 482                        mstart = image->segment[i].mem;
 483                        mend   = mstart + image->segment[i].memsz - 1;
 484                        if ((hole_end >= mstart) && (hole_start <= mend)) {
 485                                /* Advance the hole to the end of the segment */
 486                                hole_start = (mend + (size - 1)) & ~(size - 1);
 487                                hole_end   = hole_start + size - 1;
 488                                break;
 489                        }
 490                }
 491                /* If I don't overlap any segments I have found my hole! */
 492                if (i == image->nr_segments) {
 493                        pages = pfn_to_page(hole_start >> PAGE_SHIFT);
 494                        break;
 495                }
 496        }
 497        if (pages)
 498                image->control_page = hole_end;
 499
 500        return pages;
 501}
 502
 503
 504struct page *kimage_alloc_control_pages(struct kimage *image,
 505                                         unsigned int order)
 506{
 507        struct page *pages = NULL;
 508
 509        switch (image->type) {
 510        case KEXEC_TYPE_DEFAULT:
 511                pages = kimage_alloc_normal_control_pages(image, order);
 512                break;
 513        case KEXEC_TYPE_CRASH:
 514                pages = kimage_alloc_crash_control_pages(image, order);
 515                break;
 516        }
 517
 518        return pages;
 519}
 520
 521static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
 522{
 523        if (*image->entry != 0)
 524                image->entry++;
 525
 526        if (image->entry == image->last_entry) {
 527                kimage_entry_t *ind_page;
 528                struct page *page;
 529
 530                page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
 531                if (!page)
 532                        return -ENOMEM;
 533
 534                ind_page = page_address(page);
 535                *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
 536                image->entry = ind_page;
 537                image->last_entry = ind_page +
 538                                      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
 539        }
 540        *image->entry = entry;
 541        image->entry++;
 542        *image->entry = 0;
 543
 544        return 0;
 545}
 546
 547static int kimage_set_destination(struct kimage *image,
 548                                   unsigned long destination)
 549{
 550        int result;
 551
 552        destination &= PAGE_MASK;
 553        result = kimage_add_entry(image, destination | IND_DESTINATION);
 554        if (result == 0)
 555                image->destination = destination;
 556
 557        return result;
 558}
 559
 560
 561static int kimage_add_page(struct kimage *image, unsigned long page)
 562{
 563        int result;
 564
 565        page &= PAGE_MASK;
 566        result = kimage_add_entry(image, page | IND_SOURCE);
 567        if (result == 0)
 568                image->destination += PAGE_SIZE;
 569
 570        return result;
 571}
 572
 573
 574static void kimage_free_extra_pages(struct kimage *image)
 575{
 576        /* Walk through and free any extra destination pages I may have */
 577        kimage_free_page_list(&image->dest_pages);
 578
 579        /* Walk through and free any unuseable pages I have cached */
 580        kimage_free_page_list(&image->unuseable_pages);
 581
 582}
 583static int kimage_terminate(struct kimage *image)
 584{
 585        if (*image->entry != 0)
 586                image->entry++;
 587
 588        *image->entry = IND_DONE;
 589
 590        return 0;
 591}
 592
 593#define for_each_kimage_entry(image, ptr, entry) \
 594        for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
 595                ptr = (entry & IND_INDIRECTION)? \
 596                        phys_to_virt((entry & PAGE_MASK)): ptr +1)
 597
 598static void kimage_free_entry(kimage_entry_t entry)
 599{
 600        struct page *page;
 601
 602        page = pfn_to_page(entry >> PAGE_SHIFT);
 603        kimage_free_pages(page);
 604}
 605
 606static void kimage_free(struct kimage *image)
 607{
 608        kimage_entry_t *ptr, entry;
 609        kimage_entry_t ind = 0;
 610
 611        if (!image)
 612                return;
 613
 614        kimage_free_extra_pages(image);
 615        for_each_kimage_entry(image, ptr, entry) {
 616                if (entry & IND_INDIRECTION) {
 617                        /* Free the previous indirection page */
 618                        if (ind & IND_INDIRECTION)
 619                                kimage_free_entry(ind);
 620                        /* Save this indirection page until we are
 621                         * done with it.
 622                         */
 623                        ind = entry;
 624                }
 625                else if (entry & IND_SOURCE)
 626                        kimage_free_entry(entry);
 627        }
 628        /* Free the final indirection page */
 629        if (ind & IND_INDIRECTION)
 630                kimage_free_entry(ind);
 631
 632        /* Handle any machine specific cleanup */
 633        machine_kexec_cleanup(image);
 634
 635        /* Free the kexec control pages... */
 636        kimage_free_page_list(&image->control_pages);
 637        kfree(image);
 638}
 639
 640static kimage_entry_t *kimage_dst_used(struct kimage *image,
 641                                        unsigned long page)
 642{
 643        kimage_entry_t *ptr, entry;
 644        unsigned long destination = 0;
 645
 646        for_each_kimage_entry(image, ptr, entry) {
 647                if (entry & IND_DESTINATION)
 648                        destination = entry & PAGE_MASK;
 649                else if (entry & IND_SOURCE) {
 650                        if (page == destination)
 651                                return ptr;
 652                        destination += PAGE_SIZE;
 653                }
 654        }
 655
 656        return NULL;
 657}
 658
 659static struct page *kimage_alloc_page(struct kimage *image,
 660                                        gfp_t gfp_mask,
 661                                        unsigned long destination)
 662{
 663        /*
 664         * Here we implement safeguards to ensure that a source page
 665         * is not copied to its destination page before the data on
 666         * the destination page is no longer useful.
 667         *
 668         * To do this we maintain the invariant that a source page is
 669         * either its own destination page, or it is not a
 670         * destination page at all.
 671         *
 672         * That is slightly stronger than required, but the proof
 673         * that no problems will not occur is trivial, and the
 674         * implementation is simply to verify.
 675         *
 676         * When allocating all pages normally this algorithm will run
 677         * in O(N) time, but in the worst case it will run in O(N^2)
 678         * time.   If the runtime is a problem the data structures can
 679         * be fixed.
 680         */
 681        struct page *page;
 682        unsigned long addr;
 683
 684        /*
 685         * Walk through the list of destination pages, and see if I
 686         * have a match.
 687         */
 688        list_for_each_entry(page, &image->dest_pages, lru) {
 689                addr = page_to_pfn(page) << PAGE_SHIFT;
 690                if (addr == destination) {
 691                        list_del(&page->lru);
 692                        return page;
 693                }
 694        }
 695        page = NULL;
 696        while (1) {
 697                kimage_entry_t *old;
 698
 699                /* Allocate a page, if we run out of memory give up */
 700                page = kimage_alloc_pages(gfp_mask, 0);
 701                if (!page)
 702                        return NULL;
 703                /* If the page cannot be used file it away */
 704                if (page_to_pfn(page) >
 705                                (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
 706                        list_add(&page->lru, &image->unuseable_pages);
 707                        continue;
 708                }
 709                addr = page_to_pfn(page) << PAGE_SHIFT;
 710
 711                /* If it is the destination page we want use it */
 712                if (addr == destination)
 713                        break;
 714
 715                /* If the page is not a destination page use it */
 716                if (!kimage_is_destination_range(image, addr,
 717                                                  addr + PAGE_SIZE))
 718                        break;
 719
 720                /*
 721                 * I know that the page is someones destination page.
 722                 * See if there is already a source page for this
 723                 * destination page.  And if so swap the source pages.
 724                 */
 725                old = kimage_dst_used(image, addr);
 726                if (old) {
 727                        /* If so move it */
 728                        unsigned long old_addr;
 729                        struct page *old_page;
 730
 731                        old_addr = *old & PAGE_MASK;
 732                        old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
 733                        copy_highpage(page, old_page);
 734                        *old = addr | (*old & ~PAGE_MASK);
 735
 736                        /* The old page I have found cannot be a
 737                         * destination page, so return it.
 738                         */
 739                        addr = old_addr;
 740                        page = old_page;
 741                        break;
 742                }
 743                else {
 744                        /* Place the page on the destination list I
 745                         * will use it later.
 746                         */
 747                        list_add(&page->lru, &image->dest_pages);
 748                }
 749        }
 750
 751        return page;
 752}
 753
 754static int kimage_load_normal_segment(struct kimage *image,
 755                                         struct kexec_segment *segment)
 756{
 757        unsigned long maddr;
 758        unsigned long ubytes, mbytes;
 759        int result;
 760        unsigned char __user *buf;
 761
 762        result = 0;
 763        buf = segment->buf;
 764        ubytes = segment->bufsz;
 765        mbytes = segment->memsz;
 766        maddr = segment->mem;
 767
 768        result = kimage_set_destination(image, maddr);
 769        if (result < 0)
 770                goto out;
 771
 772        while (mbytes) {
 773                struct page *page;
 774                char *ptr;
 775                size_t uchunk, mchunk;
 776
 777                page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
 778                if (page == 0) {
 779                        result  = -ENOMEM;
 780                        goto out;
 781                }
 782                result = kimage_add_page(image, page_to_pfn(page)
 783                                                                << PAGE_SHIFT);
 784                if (result < 0)
 785                        goto out;
 786
 787                ptr = kmap(page);
 788                /* Start with a clear page */
 789                memset(ptr, 0, PAGE_SIZE);
 790                ptr += maddr & ~PAGE_MASK;
 791                mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
 792                if (mchunk > mbytes)
 793                        mchunk = mbytes;
 794
 795                uchunk = mchunk;
 796                if (uchunk > ubytes)
 797                        uchunk = ubytes;
 798
 799                result = copy_from_user(ptr, buf, uchunk);
 800                kunmap(page);
 801                if (result) {
 802                        result = (result < 0) ? result : -EIO;
 803                        goto out;
 804                }
 805                ubytes -= uchunk;
 806                maddr  += mchunk;
 807                buf    += mchunk;
 808                mbytes -= mchunk;
 809        }
 810out:
 811        return result;
 812}
 813
 814static int kimage_load_crash_segment(struct kimage *image,
 815                                        struct kexec_segment *segment)
 816{
 817        /* For crash dumps kernels we simply copy the data from
 818         * user space to it's destination.
 819         * We do things a page at a time for the sake of kmap.
 820         */
 821        unsigned long maddr;
 822        unsigned long ubytes, mbytes;
 823        int result;
 824        unsigned char __user *buf;
 825
 826        result = 0;
 827        buf = segment->buf;
 828        ubytes = segment->bufsz;
 829        mbytes = segment->memsz;
 830        maddr = segment->mem;
 831        while (mbytes) {
 832                struct page *page;
 833                char *ptr;
 834                size_t uchunk, mchunk;
 835
 836                page = pfn_to_page(maddr >> PAGE_SHIFT);
 837                if (page == 0) {
 838                        result  = -ENOMEM;
 839                        goto out;
 840                }
 841                ptr = kmap(page);
 842                ptr += maddr & ~PAGE_MASK;
 843                mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
 844                if (mchunk > mbytes)
 845                        mchunk = mbytes;
 846
 847                uchunk = mchunk;
 848                if (uchunk > ubytes) {
 849                        uchunk = ubytes;
 850                        /* Zero the trailing part of the page */
 851                        memset(ptr + uchunk, 0, mchunk - uchunk);
 852                }
 853                result = copy_from_user(ptr, buf, uchunk);
 854                kunmap(page);
 855                if (result) {
 856                        result = (result < 0) ? result : -EIO;
 857                        goto out;
 858                }
 859                ubytes -= uchunk;
 860                maddr  += mchunk;
 861                buf    += mchunk;
 862                mbytes -= mchunk;
 863        }
 864out:
 865        return result;
 866}
 867
 868static int kimage_load_segment(struct kimage *image,
 869                                struct kexec_segment *segment)
 870{
 871        int result = -ENOMEM;
 872
 873        switch (image->type) {
 874        case KEXEC_TYPE_DEFAULT:
 875                result = kimage_load_normal_segment(image, segment);
 876                break;
 877        case KEXEC_TYPE_CRASH:
 878                result = kimage_load_crash_segment(image, segment);
 879                break;
 880        }
 881
 882        return result;
 883}
 884
 885/*
 886 * Exec Kernel system call: for obvious reasons only root may call it.
 887 *
 888 * This call breaks up into three pieces.
 889 * - A generic part which loads the new kernel from the current
 890 *   address space, and very carefully places the data in the
 891 *   allocated pages.
 892 *
 893 * - A generic part that interacts with the kernel and tells all of
 894 *   the devices to shut down.  Preventing on-going dmas, and placing
 895 *   the devices in a consistent state so a later kernel can
 896 *   reinitialize them.
 897 *
 898 * - A machine specific part that includes the syscall number
 899 *   and the copies the image to it's final destination.  And
 900 *   jumps into the image at entry.
 901 *
 902 * kexec does not sync, or unmount filesystems so if you need
 903 * that to happen you need to do that yourself.
 904 */
 905struct kimage *kexec_image;
 906struct kimage *kexec_crash_image;
 907/*
 908 * A home grown binary mutex.
 909 * Nothing can wait so this mutex is safe to use
 910 * in interrupt context :)
 911 */
 912static int kexec_lock;
 913
 914asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
 915                                struct kexec_segment __user *segments,
 916                                unsigned long flags)
 917{
 918        struct kimage **dest_image, *image;
 919        int locked;
 920        int result;
 921
 922        /* We only trust the superuser with rebooting the system. */
 923        if (!capable(CAP_SYS_BOOT))
 924                return -EPERM;
 925
 926        /*
 927         * Verify we have a legal set of flags
 928         * This leaves us room for future extensions.
 929         */
 930        if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
 931                return -EINVAL;
 932
 933        /* Verify we are on the appropriate architecture */
 934        if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
 935                ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
 936                return -EINVAL;
 937
 938        /* Put an artificial cap on the number
 939         * of segments passed to kexec_load.
 940         */
 941        if (nr_segments > KEXEC_SEGMENT_MAX)
 942                return -EINVAL;
 943
 944        image = NULL;
 945        result = 0;
 946
 947        /* Because we write directly to the reserved memory
 948         * region when loading crash kernels we need a mutex here to
 949         * prevent multiple crash  kernels from attempting to load
 950         * simultaneously, and to prevent a crash kernel from loading
 951         * over the top of a in use crash kernel.
 952         *
 953         * KISS: always take the mutex.
 954         */
 955        locked = xchg(&kexec_lock, 1);
 956        if (locked)
 957                return -EBUSY;
 958
 959        dest_image = &kexec_image;
 960        if (flags & KEXEC_ON_CRASH)
 961                dest_image = &kexec_crash_image;
 962        if (nr_segments > 0) {
 963                unsigned long i;
 964
 965                /* Loading another kernel to reboot into */
 966                if ((flags & KEXEC_ON_CRASH) == 0)
 967                        result = kimage_normal_alloc(&image, entry,
 968                                                        nr_segments, segments);
 969                /* Loading another kernel to switch to if this one crashes */
 970                else if (flags & KEXEC_ON_CRASH) {
 971                        /* Free any current crash dump kernel before
 972                         * we corrupt it.
 973                         */
 974                        kimage_free(xchg(&kexec_crash_image, NULL));
 975                        result = kimage_crash_alloc(&image, entry,
 976                                                     nr_segments, segments);
 977                }
 978                if (result)
 979                        goto out;
 980
 981                result = machine_kexec_prepare(image);
 982                if (result)
 983                        goto out;
 984
 985                for (i = 0; i < nr_segments; i++) {
 986                        result = kimage_load_segment(image, &image->segment[i]);
 987                        if (result)
 988                                goto out;
 989                }
 990                result = kimage_terminate(image);
 991                if (result)
 992                        goto out;
 993        }
 994        /* Install the new kernel, and  Uninstall the old */
 995        image = xchg(dest_image, image);
 996
 997out:
 998        locked = xchg(&kexec_lock, 0); /* Release the mutex */
 999        BUG_ON(!locked);
1000        kimage_free(image);
1001
1002        return result;
1003}
1004
1005#ifdef CONFIG_COMPAT
1006asmlinkage long compat_sys_kexec_load(unsigned long entry,
1007                                unsigned long nr_segments,
1008                                struct compat_kexec_segment __user *segments,
1009                                unsigned long flags)
1010{
1011        struct compat_kexec_segment in;
1012        struct kexec_segment out, __user *ksegments;
1013        unsigned long i, result;
1014
1015        /* Don't allow clients that don't understand the native
1016         * architecture to do anything.
1017         */
1018        if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1019                return -EINVAL;
1020
1021        if (nr_segments > KEXEC_SEGMENT_MAX)
1022                return -EINVAL;
1023
1024        ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1025        for (i=0; i < nr_segments; i++) {
1026                result = copy_from_user(&in, &segments[i], sizeof(in));
1027                if (result)
1028                        return -EFAULT;
1029
1030                out.buf   = compat_ptr(in.buf);
1031                out.bufsz = in.bufsz;
1032                out.mem   = in.mem;
1033                out.memsz = in.memsz;
1034
1035                result = copy_to_user(&ksegments[i], &out, sizeof(out));
1036                if (result)
1037                        return -EFAULT;
1038        }
1039
1040        return sys_kexec_load(entry, nr_segments, ksegments, flags);
1041}
1042#endif
1043
1044void crash_kexec(struct pt_regs *regs)
1045{
1046        int locked;
1047
1048
1049        /* Take the kexec_lock here to prevent sys_kexec_load
1050         * running on one cpu from replacing the crash kernel
1051         * we are using after a panic on a different cpu.
1052         *
1053         * If the crash kernel was not located in a fixed area
1054         * of memory the xchg(&kexec_crash_image) would be
1055         * sufficient.  But since I reuse the memory...
1056         */
1057        locked = xchg(&kexec_lock, 1);
1058        if (!locked) {
1059                if (kexec_crash_image) {
1060                        struct pt_regs fixed_regs;
1061                        crash_setup_regs(&fixed_regs, regs);
1062                        machine_crash_shutdown(&fixed_regs);
1063                        machine_kexec(kexec_crash_image);
1064                }
1065                locked = xchg(&kexec_lock, 0);
1066                BUG_ON(!locked);
1067        }
1068}
1069
1070static int __init crash_notes_memory_init(void)
1071{
1072        /* Allocate memory for saving cpu registers. */
1073        crash_notes = alloc_percpu(note_buf_t);
1074        if (!crash_notes) {
1075                printk("Kexec: Memory allocation for saving cpu register"
1076                " states failed\n");
1077                return -ENOMEM;
1078        }
1079        return 0;
1080}
1081module_init(crash_notes_memory_init)
1082