1
2
3
4
5
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/btf.h>
10#include <linux/capability.h>
11#include <linux/mm.h>
12#include <linux/file.h>
13#include <linux/slab.h>
14#include <linux/fs.h>
15#include <linux/kexec.h>
16#include <linux/mutex.h>
17#include <linux/list.h>
18#include <linux/highmem.h>
19#include <linux/syscalls.h>
20#include <linux/reboot.h>
21#include <linux/ioport.h>
22#include <linux/hardirq.h>
23#include <linux/elf.h>
24#include <linux/elfcore.h>
25#include <linux/utsname.h>
26#include <linux/numa.h>
27#include <linux/suspend.h>
28#include <linux/device.h>
29#include <linux/freezer.h>
30#include <linux/panic_notifier.h>
31#include <linux/pm.h>
32#include <linux/cpu.h>
33#include <linux/uaccess.h>
34#include <linux/io.h>
35#include <linux/console.h>
36#include <linux/vmalloc.h>
37#include <linux/swap.h>
38#include <linux/syscore_ops.h>
39#include <linux/compiler.h>
40#include <linux/hugetlb.h>
41#include <linux/objtool.h>
42#include <linux/kmsg_dump.h>
43
44#include <asm/page.h>
45#include <asm/sections.h>
46
47#include <crypto/hash.h>
48#include "kexec_internal.h"
49
50atomic_t __kexec_lock = ATOMIC_INIT(0);
51
52
53bool kexec_in_progress = false;
54
55int kexec_should_crash(struct task_struct *p)
56{
57
58
59
60
61
62 if (crash_kexec_post_notifiers)
63 return 0;
64
65
66
67
68 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
69 return 1;
70 return 0;
71}
72
73int kexec_crash_loaded(void)
74{
75 return !!kexec_crash_image;
76}
77EXPORT_SYMBOL_GPL(kexec_crash_loaded);
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123#define KIMAGE_NO_DEST (-1UL)
124#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
125
126static struct page *kimage_alloc_page(struct kimage *image,
127 gfp_t gfp_mask,
128 unsigned long dest);
129
130int sanity_check_segment_list(struct kimage *image)
131{
132 int i;
133 unsigned long nr_segments = image->nr_segments;
134 unsigned long total_pages = 0;
135 unsigned long nr_pages = totalram_pages();
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150 for (i = 0; i < nr_segments; i++) {
151 unsigned long mstart, mend;
152
153 mstart = image->segment[i].mem;
154 mend = mstart + image->segment[i].memsz;
155 if (mstart > mend)
156 return -EADDRNOTAVAIL;
157 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
158 return -EADDRNOTAVAIL;
159 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
160 return -EADDRNOTAVAIL;
161 }
162
163
164
165
166
167
168 for (i = 0; i < nr_segments; i++) {
169 unsigned long mstart, mend;
170 unsigned long j;
171
172 mstart = image->segment[i].mem;
173 mend = mstart + image->segment[i].memsz;
174 for (j = 0; j < i; j++) {
175 unsigned long pstart, pend;
176
177 pstart = image->segment[j].mem;
178 pend = pstart + image->segment[j].memsz;
179
180 if ((mend > pstart) && (mstart < pend))
181 return -EINVAL;
182 }
183 }
184
185
186
187
188
189
190 for (i = 0; i < nr_segments; i++) {
191 if (image->segment[i].bufsz > image->segment[i].memsz)
192 return -EINVAL;
193 }
194
195
196
197
198
199
200 for (i = 0; i < nr_segments; i++) {
201 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
202 return -EINVAL;
203
204 total_pages += PAGE_COUNT(image->segment[i].memsz);
205 }
206
207 if (total_pages > nr_pages / 2)
208 return -EINVAL;
209
210
211
212
213
214
215
216
217
218
219
220 if (image->type == KEXEC_TYPE_CRASH) {
221 for (i = 0; i < nr_segments; i++) {
222 unsigned long mstart, mend;
223
224 mstart = image->segment[i].mem;
225 mend = mstart + image->segment[i].memsz - 1;
226
227 if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
228 (mend > phys_to_boot_phys(crashk_res.end)))
229 return -EADDRNOTAVAIL;
230 }
231 }
232
233 return 0;
234}
235
236struct kimage *do_kimage_alloc_init(void)
237{
238 struct kimage *image;
239
240
241 image = kzalloc(sizeof(*image), GFP_KERNEL);
242 if (!image)
243 return NULL;
244
245 image->head = 0;
246 image->entry = &image->head;
247 image->last_entry = &image->head;
248 image->control_page = ~0;
249 image->type = KEXEC_TYPE_DEFAULT;
250
251
252 INIT_LIST_HEAD(&image->control_pages);
253
254
255 INIT_LIST_HEAD(&image->dest_pages);
256
257
258 INIT_LIST_HEAD(&image->unusable_pages);
259
260#ifdef CONFIG_CRASH_HOTPLUG
261 image->hp_action = KEXEC_CRASH_HP_NONE;
262 image->elfcorehdr_index = -1;
263 image->elfcorehdr_updated = false;
264#endif
265
266 return image;
267}
268
269int kimage_is_destination_range(struct kimage *image,
270 unsigned long start,
271 unsigned long end)
272{
273 unsigned long i;
274
275 for (i = 0; i < image->nr_segments; i++) {
276 unsigned long mstart, mend;
277
278 mstart = image->segment[i].mem;
279 mend = mstart + image->segment[i].memsz;
280 if ((end > mstart) && (start < mend))
281 return 1;
282 }
283
284 return 0;
285}
286
287static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
288{
289 struct page *pages;
290
291 if (fatal_signal_pending(current))
292 return NULL;
293 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
294 if (pages) {
295 unsigned int count, i;
296
297 pages->mapping = NULL;
298 set_page_private(pages, order);
299 count = 1 << order;
300 for (i = 0; i < count; i++)
301 SetPageReserved(pages + i);
302
303 arch_kexec_post_alloc_pages(page_address(pages), count,
304 gfp_mask);
305
306 if (gfp_mask & __GFP_ZERO)
307 for (i = 0; i < count; i++)
308 clear_highpage(pages + i);
309 }
310
311 return pages;
312}
313
314static void kimage_free_pages(struct page *page)
315{
316 unsigned int order, count, i;
317
318 order = page_private(page);
319 count = 1 << order;
320
321 arch_kexec_pre_free_pages(page_address(page), count);
322
323 for (i = 0; i < count; i++)
324 ClearPageReserved(page + i);
325 __free_pages(page, order);
326}
327
328void kimage_free_page_list(struct list_head *list)
329{
330 struct page *page, *next;
331
332 list_for_each_entry_safe(page, next, list, lru) {
333 list_del(&page->lru);
334 kimage_free_pages(page);
335 }
336}
337
338static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
339 unsigned int order)
340{
341
342
343
344
345
346
347
348
349
350
351
352
353
354 struct list_head extra_pages;
355 struct page *pages;
356 unsigned int count;
357
358 count = 1 << order;
359 INIT_LIST_HEAD(&extra_pages);
360
361
362
363
364 do {
365 unsigned long pfn, epfn, addr, eaddr;
366
367 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
368 if (!pages)
369 break;
370 pfn = page_to_boot_pfn(pages);
371 epfn = pfn + count;
372 addr = pfn << PAGE_SHIFT;
373 eaddr = epfn << PAGE_SHIFT;
374 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
375 kimage_is_destination_range(image, addr, eaddr)) {
376 list_add(&pages->lru, &extra_pages);
377 pages = NULL;
378 }
379 } while (!pages);
380
381 if (pages) {
382
383 list_add(&pages->lru, &image->control_pages);
384
385
386
387
388
389
390
391 }
392
393
394
395
396
397
398
399 kimage_free_page_list(&extra_pages);
400
401 return pages;
402}
403
404static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
405 unsigned int order)
406{
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428 unsigned long hole_start, hole_end, size;
429 struct page *pages;
430
431 pages = NULL;
432 size = (1 << order) << PAGE_SHIFT;
433 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
434 hole_end = hole_start + size - 1;
435 while (hole_end <= crashk_res.end) {
436 unsigned long i;
437
438 cond_resched();
439
440 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
441 break;
442
443 for (i = 0; i < image->nr_segments; i++) {
444 unsigned long mstart, mend;
445
446 mstart = image->segment[i].mem;
447 mend = mstart + image->segment[i].memsz - 1;
448 if ((hole_end >= mstart) && (hole_start <= mend)) {
449
450 hole_start = (mend + (size - 1)) & ~(size - 1);
451 hole_end = hole_start + size - 1;
452 break;
453 }
454 }
455
456 if (i == image->nr_segments) {
457 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
458 image->control_page = hole_end;
459 break;
460 }
461 }
462
463
464 if (pages)
465 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
466
467 return pages;
468}
469
470
471struct page *kimage_alloc_control_pages(struct kimage *image,
472 unsigned int order)
473{
474 struct page *pages = NULL;
475
476 switch (image->type) {
477 case KEXEC_TYPE_DEFAULT:
478 pages = kimage_alloc_normal_control_pages(image, order);
479 break;
480 case KEXEC_TYPE_CRASH:
481 pages = kimage_alloc_crash_control_pages(image, order);
482 break;
483 }
484
485 return pages;
486}
487
488int kimage_crash_copy_vmcoreinfo(struct kimage *image)
489{
490 struct page *vmcoreinfo_page;
491 void *safecopy;
492
493 if (image->type != KEXEC_TYPE_CRASH)
494 return 0;
495
496
497
498
499
500
501
502
503
504
505 vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
506 if (!vmcoreinfo_page) {
507 pr_warn("Could not allocate vmcoreinfo buffer\n");
508 return -ENOMEM;
509 }
510 safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
511 if (!safecopy) {
512 pr_warn("Could not vmap vmcoreinfo buffer\n");
513 return -ENOMEM;
514 }
515
516 image->vmcoreinfo_data_copy = safecopy;
517 crash_update_vmcoreinfo_safecopy(safecopy);
518
519 return 0;
520}
521
522static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
523{
524 if (*image->entry != 0)
525 image->entry++;
526
527 if (image->entry == image->last_entry) {
528 kimage_entry_t *ind_page;
529 struct page *page;
530
531 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
532 if (!page)
533 return -ENOMEM;
534
535 ind_page = page_address(page);
536 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
537 image->entry = ind_page;
538 image->last_entry = ind_page +
539 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
540 }
541 *image->entry = entry;
542 image->entry++;
543 *image->entry = 0;
544
545 return 0;
546}
547
548static int kimage_set_destination(struct kimage *image,
549 unsigned long destination)
550{
551 destination &= PAGE_MASK;
552
553 return kimage_add_entry(image, destination | IND_DESTINATION);
554}
555
556
557static int kimage_add_page(struct kimage *image, unsigned long page)
558{
559 page &= PAGE_MASK;
560
561 return kimage_add_entry(image, page | IND_SOURCE);
562}
563
564
565static void kimage_free_extra_pages(struct kimage *image)
566{
567
568 kimage_free_page_list(&image->dest_pages);
569
570
571 kimage_free_page_list(&image->unusable_pages);
572
573}
574
575void kimage_terminate(struct kimage *image)
576{
577 if (*image->entry != 0)
578 image->entry++;
579
580 *image->entry = IND_DONE;
581}
582
583#define for_each_kimage_entry(image, ptr, entry) \
584 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
585 ptr = (entry & IND_INDIRECTION) ? \
586 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
587
588static void kimage_free_entry(kimage_entry_t entry)
589{
590 struct page *page;
591
592 page = boot_pfn_to_page(entry >> PAGE_SHIFT);
593 kimage_free_pages(page);
594}
595
596void kimage_free(struct kimage *image)
597{
598 kimage_entry_t *ptr, entry;
599 kimage_entry_t ind = 0;
600
601 if (!image)
602 return;
603
604 if (image->vmcoreinfo_data_copy) {
605 crash_update_vmcoreinfo_safecopy(NULL);
606 vunmap(image->vmcoreinfo_data_copy);
607 }
608
609 kimage_free_extra_pages(image);
610 for_each_kimage_entry(image, ptr, entry) {
611 if (entry & IND_INDIRECTION) {
612
613 if (ind & IND_INDIRECTION)
614 kimage_free_entry(ind);
615
616
617
618 ind = entry;
619 } else if (entry & IND_SOURCE)
620 kimage_free_entry(entry);
621 }
622
623 if (ind & IND_INDIRECTION)
624 kimage_free_entry(ind);
625
626
627 machine_kexec_cleanup(image);
628
629
630 kimage_free_page_list(&image->control_pages);
631
632
633
634
635
636 if (image->file_mode)
637 kimage_file_post_load_cleanup(image);
638
639 kfree(image);
640}
641
642static kimage_entry_t *kimage_dst_used(struct kimage *image,
643 unsigned long page)
644{
645 kimage_entry_t *ptr, entry;
646 unsigned long destination = 0;
647
648 for_each_kimage_entry(image, ptr, entry) {
649 if (entry & IND_DESTINATION)
650 destination = entry & PAGE_MASK;
651 else if (entry & IND_SOURCE) {
652 if (page == destination)
653 return ptr;
654 destination += PAGE_SIZE;
655 }
656 }
657
658 return NULL;
659}
660
661static struct page *kimage_alloc_page(struct kimage *image,
662 gfp_t gfp_mask,
663 unsigned long destination)
664{
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683 struct page *page;
684 unsigned long addr;
685
686
687
688
689
690 list_for_each_entry(page, &image->dest_pages, lru) {
691 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
692 if (addr == destination) {
693 list_del(&page->lru);
694 return page;
695 }
696 }
697 page = NULL;
698 while (1) {
699 kimage_entry_t *old;
700
701
702 page = kimage_alloc_pages(gfp_mask, 0);
703 if (!page)
704 return NULL;
705
706 if (page_to_boot_pfn(page) >
707 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
708 list_add(&page->lru, &image->unusable_pages);
709 continue;
710 }
711 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
712
713
714 if (addr == destination)
715 break;
716
717
718 if (!kimage_is_destination_range(image, addr,
719 addr + PAGE_SIZE))
720 break;
721
722
723
724
725
726
727 old = kimage_dst_used(image, addr);
728 if (old) {
729
730 unsigned long old_addr;
731 struct page *old_page;
732
733 old_addr = *old & PAGE_MASK;
734 old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
735 copy_highpage(page, old_page);
736 *old = addr | (*old & ~PAGE_MASK);
737
738
739
740
741
742 if (!(gfp_mask & __GFP_HIGHMEM) &&
743 PageHighMem(old_page)) {
744 kimage_free_pages(old_page);
745 continue;
746 }
747 page = old_page;
748 break;
749 }
750
751 list_add(&page->lru, &image->dest_pages);
752 }
753
754 return page;
755}
756
757static int kimage_load_normal_segment(struct kimage *image,
758 struct kexec_segment *segment)
759{
760 unsigned long maddr;
761 size_t ubytes, mbytes;
762 int result;
763 unsigned char __user *buf = NULL;
764 unsigned char *kbuf = NULL;
765
766 if (image->file_mode)
767 kbuf = segment->kbuf;
768 else
769 buf = segment->buf;
770 ubytes = segment->bufsz;
771 mbytes = segment->memsz;
772 maddr = segment->mem;
773
774 result = kimage_set_destination(image, maddr);
775 if (result < 0)
776 goto out;
777
778 while (mbytes) {
779 struct page *page;
780 char *ptr;
781 size_t uchunk, mchunk;
782
783 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
784 if (!page) {
785 result = -ENOMEM;
786 goto out;
787 }
788 result = kimage_add_page(image, page_to_boot_pfn(page)
789 << PAGE_SHIFT);
790 if (result < 0)
791 goto out;
792
793 ptr = kmap_local_page(page);
794
795 clear_page(ptr);
796 ptr += maddr & ~PAGE_MASK;
797 mchunk = min_t(size_t, mbytes,
798 PAGE_SIZE - (maddr & ~PAGE_MASK));
799 uchunk = min(ubytes, mchunk);
800
801
802 if (image->file_mode)
803 memcpy(ptr, kbuf, uchunk);
804 else
805 result = copy_from_user(ptr, buf, uchunk);
806 kunmap_local(ptr);
807 if (result) {
808 result = -EFAULT;
809 goto out;
810 }
811 ubytes -= uchunk;
812 maddr += mchunk;
813 if (image->file_mode)
814 kbuf += mchunk;
815 else
816 buf += mchunk;
817 mbytes -= mchunk;
818
819 cond_resched();
820 }
821out:
822 return result;
823}
824
825static int kimage_load_crash_segment(struct kimage *image,
826 struct kexec_segment *segment)
827{
828
829
830
831
832 unsigned long maddr;
833 size_t ubytes, mbytes;
834 int result;
835 unsigned char __user *buf = NULL;
836 unsigned char *kbuf = NULL;
837
838 result = 0;
839 if (image->file_mode)
840 kbuf = segment->kbuf;
841 else
842 buf = segment->buf;
843 ubytes = segment->bufsz;
844 mbytes = segment->memsz;
845 maddr = segment->mem;
846 while (mbytes) {
847 struct page *page;
848 char *ptr;
849 size_t uchunk, mchunk;
850
851 page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
852 if (!page) {
853 result = -ENOMEM;
854 goto out;
855 }
856 arch_kexec_post_alloc_pages(page_address(page), 1, 0);
857 ptr = kmap_local_page(page);
858 ptr += maddr & ~PAGE_MASK;
859 mchunk = min_t(size_t, mbytes,
860 PAGE_SIZE - (maddr & ~PAGE_MASK));
861 uchunk = min(ubytes, mchunk);
862 if (mchunk > uchunk) {
863
864 memset(ptr + uchunk, 0, mchunk - uchunk);
865 }
866
867
868 if (image->file_mode)
869 memcpy(ptr, kbuf, uchunk);
870 else
871 result = copy_from_user(ptr, buf, uchunk);
872 kexec_flush_icache_page(page);
873 kunmap_local(ptr);
874 arch_kexec_pre_free_pages(page_address(page), 1);
875 if (result) {
876 result = -EFAULT;
877 goto out;
878 }
879 ubytes -= uchunk;
880 maddr += mchunk;
881 if (image->file_mode)
882 kbuf += mchunk;
883 else
884 buf += mchunk;
885 mbytes -= mchunk;
886
887 cond_resched();
888 }
889out:
890 return result;
891}
892
893int kimage_load_segment(struct kimage *image,
894 struct kexec_segment *segment)
895{
896 int result = -ENOMEM;
897
898 switch (image->type) {
899 case KEXEC_TYPE_DEFAULT:
900 result = kimage_load_normal_segment(image, segment);
901 break;
902 case KEXEC_TYPE_CRASH:
903 result = kimage_load_crash_segment(image, segment);
904 break;
905 }
906
907 return result;
908}
909
910struct kexec_load_limit {
911
912 struct mutex mutex;
913 int limit;
914};
915
916static struct kexec_load_limit load_limit_reboot = {
917 .mutex = __MUTEX_INITIALIZER(load_limit_reboot.mutex),
918 .limit = -1,
919};
920
921static struct kexec_load_limit load_limit_panic = {
922 .mutex = __MUTEX_INITIALIZER(load_limit_panic.mutex),
923 .limit = -1,
924};
925
926struct kimage *kexec_image;
927struct kimage *kexec_crash_image;
928static int kexec_load_disabled;
929
930#ifdef CONFIG_SYSCTL
931static int kexec_limit_handler(struct ctl_table *table, int write,
932 void *buffer, size_t *lenp, loff_t *ppos)
933{
934 struct kexec_load_limit *limit = table->data;
935 int val;
936 struct ctl_table tmp = {
937 .data = &val,
938 .maxlen = sizeof(val),
939 .mode = table->mode,
940 };
941 int ret;
942
943 if (write) {
944 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
945 if (ret)
946 return ret;
947
948 if (val < 0)
949 return -EINVAL;
950
951 mutex_lock(&limit->mutex);
952 if (limit->limit != -1 && val >= limit->limit)
953 ret = -EINVAL;
954 else
955 limit->limit = val;
956 mutex_unlock(&limit->mutex);
957
958 return ret;
959 }
960
961 mutex_lock(&limit->mutex);
962 val = limit->limit;
963 mutex_unlock(&limit->mutex);
964
965 return proc_dointvec(&tmp, write, buffer, lenp, ppos);
966}
967
968static struct ctl_table kexec_core_sysctls[] = {
969 {
970 .procname = "kexec_load_disabled",
971 .data = &kexec_load_disabled,
972 .maxlen = sizeof(int),
973 .mode = 0644,
974
975 .proc_handler = proc_dointvec_minmax,
976 .extra1 = SYSCTL_ONE,
977 .extra2 = SYSCTL_ONE,
978 },
979 {
980 .procname = "kexec_load_limit_panic",
981 .data = &load_limit_panic,
982 .mode = 0644,
983 .proc_handler = kexec_limit_handler,
984 },
985 {
986 .procname = "kexec_load_limit_reboot",
987 .data = &load_limit_reboot,
988 .mode = 0644,
989 .proc_handler = kexec_limit_handler,
990 },
991 { }
992};
993
994static int __init kexec_core_sysctl_init(void)
995{
996 register_sysctl_init("kernel", kexec_core_sysctls);
997 return 0;
998}
999late_initcall(kexec_core_sysctl_init);
1000#endif
1001
1002bool kexec_load_permitted(int kexec_image_type)
1003{
1004 struct kexec_load_limit *limit;
1005
1006
1007
1008
1009
1010 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
1011 return false;
1012
1013
1014 limit = (kexec_image_type == KEXEC_TYPE_CRASH) ?
1015 &load_limit_panic : &load_limit_reboot;
1016 mutex_lock(&limit->mutex);
1017 if (!limit->limit) {
1018 mutex_unlock(&limit->mutex);
1019 return false;
1020 }
1021 if (limit->limit != -1)
1022 limit->limit--;
1023 mutex_unlock(&limit->mutex);
1024
1025 return true;
1026}
1027
1028
1029
1030
1031
1032
1033void __noclone __crash_kexec(struct pt_regs *regs)
1034{
1035
1036
1037
1038
1039
1040
1041
1042
1043 if (kexec_trylock()) {
1044 if (kexec_crash_image) {
1045 struct pt_regs fixed_regs;
1046
1047 crash_setup_regs(&fixed_regs, regs);
1048 crash_save_vmcoreinfo();
1049 machine_crash_shutdown(&fixed_regs);
1050 machine_kexec(kexec_crash_image);
1051 }
1052 kexec_unlock();
1053 }
1054}
1055STACK_FRAME_NON_STANDARD(__crash_kexec);
1056
1057__bpf_kfunc void crash_kexec(struct pt_regs *regs)
1058{
1059 int old_cpu, this_cpu;
1060
1061
1062
1063
1064
1065
1066 this_cpu = raw_smp_processor_id();
1067 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
1068 if (old_cpu == PANIC_CPU_INVALID) {
1069
1070 __crash_kexec(regs);
1071
1072
1073
1074
1075
1076 atomic_set(&panic_cpu, PANIC_CPU_INVALID);
1077 }
1078}
1079
1080static inline resource_size_t crash_resource_size(const struct resource *res)
1081{
1082 return !res->end ? 0 : resource_size(res);
1083}
1084
1085ssize_t crash_get_memory_size(void)
1086{
1087 ssize_t size = 0;
1088
1089 if (!kexec_trylock())
1090 return -EBUSY;
1091
1092 size += crash_resource_size(&crashk_res);
1093 size += crash_resource_size(&crashk_low_res);
1094
1095 kexec_unlock();
1096 return size;
1097}
1098
1099static int __crash_shrink_memory(struct resource *old_res,
1100 unsigned long new_size)
1101{
1102 struct resource *ram_res;
1103
1104 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1105 if (!ram_res)
1106 return -ENOMEM;
1107
1108 ram_res->start = old_res->start + new_size;
1109 ram_res->end = old_res->end;
1110 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
1111 ram_res->name = "System RAM";
1112
1113 if (!new_size) {
1114 release_resource(old_res);
1115 old_res->start = 0;
1116 old_res->end = 0;
1117 } else {
1118 crashk_res.end = ram_res->start - 1;
1119 }
1120
1121 crash_free_reserved_phys_range(ram_res->start, ram_res->end);
1122 insert_resource(&iomem_resource, ram_res);
1123
1124 return 0;
1125}
1126
1127int crash_shrink_memory(unsigned long new_size)
1128{
1129 int ret = 0;
1130 unsigned long old_size, low_size;
1131
1132 if (!kexec_trylock())
1133 return -EBUSY;
1134
1135 if (kexec_crash_image) {
1136 ret = -ENOENT;
1137 goto unlock;
1138 }
1139
1140 low_size = crash_resource_size(&crashk_low_res);
1141 old_size = crash_resource_size(&crashk_res) + low_size;
1142 new_size = roundup(new_size, KEXEC_CRASH_MEM_ALIGN);
1143 if (new_size >= old_size) {
1144 ret = (new_size == old_size) ? 0 : -EINVAL;
1145 goto unlock;
1146 }
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156 if (low_size > new_size) {
1157 ret = __crash_shrink_memory(&crashk_res, 0);
1158 if (ret)
1159 goto unlock;
1160
1161 ret = __crash_shrink_memory(&crashk_low_res, new_size);
1162 } else {
1163 ret = __crash_shrink_memory(&crashk_res, new_size - low_size);
1164 }
1165
1166
1167 if (!crashk_res.end && crashk_low_res.end) {
1168 crashk_res.start = crashk_low_res.start;
1169 crashk_res.end = crashk_low_res.end;
1170 release_resource(&crashk_low_res);
1171 crashk_low_res.start = 0;
1172 crashk_low_res.end = 0;
1173 insert_resource(&iomem_resource, &crashk_res);
1174 }
1175
1176unlock:
1177 kexec_unlock();
1178 return ret;
1179}
1180
1181void crash_save_cpu(struct pt_regs *regs, int cpu)
1182{
1183 struct elf_prstatus prstatus;
1184 u32 *buf;
1185
1186 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1187 return;
1188
1189
1190
1191
1192
1193
1194
1195
1196 buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1197 if (!buf)
1198 return;
1199 memset(&prstatus, 0, sizeof(prstatus));
1200 prstatus.common.pr_pid = current->pid;
1201 elf_core_copy_regs(&prstatus.pr_reg, regs);
1202 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1203 &prstatus, sizeof(prstatus));
1204 final_note(buf);
1205}
1206
1207
1208
1209
1210
1211int kernel_kexec(void)
1212{
1213 int error = 0;
1214
1215 if (!kexec_trylock())
1216 return -EBUSY;
1217 if (!kexec_image) {
1218 error = -EINVAL;
1219 goto Unlock;
1220 }
1221
1222#ifdef CONFIG_KEXEC_JUMP
1223 if (kexec_image->preserve_context) {
1224 pm_prepare_console();
1225 error = freeze_processes();
1226 if (error) {
1227 error = -EBUSY;
1228 goto Restore_console;
1229 }
1230 suspend_console();
1231 error = dpm_suspend_start(PMSG_FREEZE);
1232 if (error)
1233 goto Resume_console;
1234
1235
1236
1237
1238
1239
1240
1241 error = dpm_suspend_end(PMSG_FREEZE);
1242 if (error)
1243 goto Resume_devices;
1244 error = suspend_disable_secondary_cpus();
1245 if (error)
1246 goto Enable_cpus;
1247 local_irq_disable();
1248 error = syscore_suspend();
1249 if (error)
1250 goto Enable_irqs;
1251 } else
1252#endif
1253 {
1254 kexec_in_progress = true;
1255 kernel_restart_prepare("kexec reboot");
1256 migrate_to_reboot_cpu();
1257
1258
1259
1260
1261
1262
1263
1264 cpu_hotplug_enable();
1265 pr_notice("Starting new kernel\n");
1266 machine_shutdown();
1267 }
1268
1269 kmsg_dump(KMSG_DUMP_SHUTDOWN);
1270 machine_kexec(kexec_image);
1271
1272#ifdef CONFIG_KEXEC_JUMP
1273 if (kexec_image->preserve_context) {
1274 syscore_resume();
1275 Enable_irqs:
1276 local_irq_enable();
1277 Enable_cpus:
1278 suspend_enable_secondary_cpus();
1279 dpm_resume_start(PMSG_RESTORE);
1280 Resume_devices:
1281 dpm_resume_end(PMSG_RESTORE);
1282 Resume_console:
1283 resume_console();
1284 thaw_processes();
1285 Restore_console:
1286 pm_restore_console();
1287 }
1288#endif
1289
1290 Unlock:
1291 kexec_unlock();
1292 return error;
1293}
1294