1#include <linux/gfp.h>
2#include <linux/initrd.h>
3#include <linux/ioport.h>
4#include <linux/swap.h>
5#include <linux/memblock.h>
6#include <linux/swapfile.h>
7#include <linux/swapops.h>
8#include <linux/kmemleak.h>
9#include <linux/sched/task.h>
10
11#include <asm/set_memory.h>
12#include <asm/e820/api.h>
13#include <asm/init.h>
14#include <asm/page.h>
15#include <asm/page_types.h>
16#include <asm/sections.h>
17#include <asm/setup.h>
18#include <asm/tlbflush.h>
19#include <asm/tlb.h>
20#include <asm/proto.h>
21#include <asm/dma.h>
22#include <asm/microcode.h>
23#include <asm/kaslr.h>
24#include <asm/hypervisor.h>
25#include <asm/cpufeature.h>
26#include <asm/pti.h>
27#include <asm/text-patching.h>
28#include <asm/memtype.h>
29
30
31
32
33
34#define CREATE_TRACE_POINTS
35#include <trace/events/tlb.h>
36
37#include "mm_internal.h"
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53static uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
54 [_PAGE_CACHE_MODE_WB ] = 0 | 0 ,
55 [_PAGE_CACHE_MODE_WC ] = 0 | _PAGE_PCD,
56 [_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD,
57 [_PAGE_CACHE_MODE_UC ] = _PAGE_PWT | _PAGE_PCD,
58 [_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD,
59 [_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD,
60};
61
62unsigned long cachemode2protval(enum page_cache_mode pcm)
63{
64 if (likely(pcm == 0))
65 return 0;
66 return __cachemode2pte_tbl[pcm];
67}
68EXPORT_SYMBOL(cachemode2protval);
69
70static uint8_t __pte2cachemode_tbl[8] = {
71 [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB,
72 [__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
73 [__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
74 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC,
75 [__pte2cm_idx( 0 | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
76 [__pte2cm_idx(_PAGE_PWT | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
77 [__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
78 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
79};
80
81
82bool x86_has_pat_wp(void)
83{
84 return __pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] == _PAGE_CACHE_MODE_WP;
85}
86
87enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
88{
89 unsigned long masked;
90
91 masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
92 if (likely(masked == 0))
93 return 0;
94 return __pte2cachemode_tbl[__pte2cm_idx(masked)];
95}
96
97static unsigned long __initdata pgt_buf_start;
98static unsigned long __initdata pgt_buf_end;
99static unsigned long __initdata pgt_buf_top;
100
101static unsigned long min_pfn_mapped;
102
103static bool __initdata can_use_brk_pgt = true;
104
105
106
107
108
109
110
111
112
113
114__ref void *alloc_low_pages(unsigned int num)
115{
116 unsigned long pfn;
117 int i;
118
119 if (after_bootmem) {
120 unsigned int order;
121
122 order = get_order((unsigned long)num << PAGE_SHIFT);
123 return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
124 }
125
126 if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
127 unsigned long ret = 0;
128
129 if (min_pfn_mapped < max_pfn_mapped) {
130 ret = memblock_find_in_range(
131 min_pfn_mapped << PAGE_SHIFT,
132 max_pfn_mapped << PAGE_SHIFT,
133 PAGE_SIZE * num , PAGE_SIZE);
134 }
135 if (ret)
136 memblock_reserve(ret, PAGE_SIZE * num);
137 else if (can_use_brk_pgt)
138 ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE));
139
140 if (!ret)
141 panic("alloc_low_pages: can not alloc memory");
142
143 pfn = ret >> PAGE_SHIFT;
144 } else {
145 pfn = pgt_buf_end;
146 pgt_buf_end += num;
147 }
148
149 for (i = 0; i < num; i++) {
150 void *adr;
151
152 adr = __va((pfn + i) << PAGE_SHIFT);
153 clear_page(adr);
154 }
155
156 return __va(pfn << PAGE_SHIFT);
157}
158
159
160
161
162
163
164
165
166
167#ifndef CONFIG_X86_5LEVEL
168#define INIT_PGD_PAGE_TABLES 3
169#else
170#define INIT_PGD_PAGE_TABLES 4
171#endif
172
173#ifndef CONFIG_RANDOMIZE_MEMORY
174#define INIT_PGD_PAGE_COUNT (2 * INIT_PGD_PAGE_TABLES)
175#else
176#define INIT_PGD_PAGE_COUNT (4 * INIT_PGD_PAGE_TABLES)
177#endif
178
179#define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
180RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
181void __init early_alloc_pgt_buf(void)
182{
183 unsigned long tables = INIT_PGT_BUF_SIZE;
184 phys_addr_t base;
185
186 base = __pa(extend_brk(tables, PAGE_SIZE));
187
188 pgt_buf_start = base >> PAGE_SHIFT;
189 pgt_buf_end = pgt_buf_start;
190 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
191}
192
193int after_bootmem;
194
195early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
196
197struct map_range {
198 unsigned long start;
199 unsigned long end;
200 unsigned page_size_mask;
201};
202
203static int page_size_mask;
204
205
206
207
208
209
210static inline void cr4_set_bits_and_update_boot(unsigned long mask)
211{
212 mmu_cr4_features |= mask;
213 if (trampoline_cr4_features)
214 *trampoline_cr4_features = mmu_cr4_features;
215 cr4_set_bits(mask);
216}
217
218static void __init probe_page_size_mask(void)
219{
220
221
222
223
224
225 if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
226 page_size_mask |= 1 << PG_LEVEL_2M;
227 else
228 direct_gbpages = 0;
229
230
231 if (boot_cpu_has(X86_FEATURE_PSE))
232 cr4_set_bits_and_update_boot(X86_CR4_PSE);
233
234
235 __supported_pte_mask &= ~_PAGE_GLOBAL;
236 if (boot_cpu_has(X86_FEATURE_PGE)) {
237 cr4_set_bits_and_update_boot(X86_CR4_PGE);
238 __supported_pte_mask |= _PAGE_GLOBAL;
239 }
240
241
242 __default_kernel_pte_mask = __supported_pte_mask;
243
244 if (cpu_feature_enabled(X86_FEATURE_PTI))
245 __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
246
247
248 if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
249 printk(KERN_INFO "Using GB pages for direct mapping\n");
250 page_size_mask |= 1 << PG_LEVEL_1G;
251 } else {
252 direct_gbpages = 0;
253 }
254}
255
256static void setup_pcid(void)
257{
258 if (!IS_ENABLED(CONFIG_X86_64))
259 return;
260
261 if (!boot_cpu_has(X86_FEATURE_PCID))
262 return;
263
264 if (boot_cpu_has(X86_FEATURE_PGE)) {
265
266
267
268
269
270
271
272
273
274
275
276 cr4_set_bits(X86_CR4_PCIDE);
277
278
279
280
281
282
283
284 if (boot_cpu_has(X86_FEATURE_INVPCID))
285 setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE);
286 } else {
287
288
289
290
291
292
293
294 setup_clear_cpu_cap(X86_FEATURE_PCID);
295 }
296}
297
298#ifdef CONFIG_X86_32
299#define NR_RANGE_MR 3
300#else
301#define NR_RANGE_MR 5
302#endif
303
304static int __meminit save_mr(struct map_range *mr, int nr_range,
305 unsigned long start_pfn, unsigned long end_pfn,
306 unsigned long page_size_mask)
307{
308 if (start_pfn < end_pfn) {
309 if (nr_range >= NR_RANGE_MR)
310 panic("run out of range for init_memory_mapping\n");
311 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
312 mr[nr_range].end = end_pfn<<PAGE_SHIFT;
313 mr[nr_range].page_size_mask = page_size_mask;
314 nr_range++;
315 }
316
317 return nr_range;
318}
319
320
321
322
323
324static void __ref adjust_range_page_size_mask(struct map_range *mr,
325 int nr_range)
326{
327 int i;
328
329 for (i = 0; i < nr_range; i++) {
330 if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
331 !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
332 unsigned long start = round_down(mr[i].start, PMD_SIZE);
333 unsigned long end = round_up(mr[i].end, PMD_SIZE);
334
335#ifdef CONFIG_X86_32
336 if ((end >> PAGE_SHIFT) > max_low_pfn)
337 continue;
338#endif
339
340 if (memblock_is_region_memory(start, end - start))
341 mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
342 }
343 if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
344 !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
345 unsigned long start = round_down(mr[i].start, PUD_SIZE);
346 unsigned long end = round_up(mr[i].end, PUD_SIZE);
347
348 if (memblock_is_region_memory(start, end - start))
349 mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
350 }
351 }
352}
353
354static const char *page_size_string(struct map_range *mr)
355{
356 static const char str_1g[] = "1G";
357 static const char str_2m[] = "2M";
358 static const char str_4m[] = "4M";
359 static const char str_4k[] = "4k";
360
361 if (mr->page_size_mask & (1<<PG_LEVEL_1G))
362 return str_1g;
363
364
365
366
367
368 if (IS_ENABLED(CONFIG_X86_32) &&
369 !IS_ENABLED(CONFIG_X86_PAE) &&
370 mr->page_size_mask & (1<<PG_LEVEL_2M))
371 return str_4m;
372
373 if (mr->page_size_mask & (1<<PG_LEVEL_2M))
374 return str_2m;
375
376 return str_4k;
377}
378
379static int __meminit split_mem_range(struct map_range *mr, int nr_range,
380 unsigned long start,
381 unsigned long end)
382{
383 unsigned long start_pfn, end_pfn, limit_pfn;
384 unsigned long pfn;
385 int i;
386
387 limit_pfn = PFN_DOWN(end);
388
389
390 pfn = start_pfn = PFN_DOWN(start);
391#ifdef CONFIG_X86_32
392
393
394
395
396
397
398 if (pfn == 0)
399 end_pfn = PFN_DOWN(PMD_SIZE);
400 else
401 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
402#else
403 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
404#endif
405 if (end_pfn > limit_pfn)
406 end_pfn = limit_pfn;
407 if (start_pfn < end_pfn) {
408 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
409 pfn = end_pfn;
410 }
411
412
413 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
414#ifdef CONFIG_X86_32
415 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
416#else
417 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
418 if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
419 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
420#endif
421
422 if (start_pfn < end_pfn) {
423 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
424 page_size_mask & (1<<PG_LEVEL_2M));
425 pfn = end_pfn;
426 }
427
428#ifdef CONFIG_X86_64
429
430 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
431 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
432 if (start_pfn < end_pfn) {
433 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
434 page_size_mask &
435 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
436 pfn = end_pfn;
437 }
438
439
440 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
441 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
442 if (start_pfn < end_pfn) {
443 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
444 page_size_mask & (1<<PG_LEVEL_2M));
445 pfn = end_pfn;
446 }
447#endif
448
449
450 start_pfn = pfn;
451 end_pfn = limit_pfn;
452 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
453
454 if (!after_bootmem)
455 adjust_range_page_size_mask(mr, nr_range);
456
457
458 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
459 unsigned long old_start;
460 if (mr[i].end != mr[i+1].start ||
461 mr[i].page_size_mask != mr[i+1].page_size_mask)
462 continue;
463
464 old_start = mr[i].start;
465 memmove(&mr[i], &mr[i+1],
466 (nr_range - 1 - i) * sizeof(struct map_range));
467 mr[i--].start = old_start;
468 nr_range--;
469 }
470
471 for (i = 0; i < nr_range; i++)
472 pr_debug(" [mem %#010lx-%#010lx] page %s\n",
473 mr[i].start, mr[i].end - 1,
474 page_size_string(&mr[i]));
475
476 return nr_range;
477}
478
479struct range pfn_mapped[E820_MAX_ENTRIES];
480int nr_pfn_mapped;
481
482static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
483{
484 nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_MAX_ENTRIES,
485 nr_pfn_mapped, start_pfn, end_pfn);
486 nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_MAX_ENTRIES);
487
488 max_pfn_mapped = max(max_pfn_mapped, end_pfn);
489
490 if (start_pfn < (1UL<<(32-PAGE_SHIFT)))
491 max_low_pfn_mapped = max(max_low_pfn_mapped,
492 min(end_pfn, 1UL<<(32-PAGE_SHIFT)));
493}
494
495bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
496{
497 int i;
498
499 for (i = 0; i < nr_pfn_mapped; i++)
500 if ((start_pfn >= pfn_mapped[i].start) &&
501 (end_pfn <= pfn_mapped[i].end))
502 return true;
503
504 return false;
505}
506
507
508
509
510
511
512unsigned long __ref init_memory_mapping(unsigned long start,
513 unsigned long end, pgprot_t prot)
514{
515 struct map_range mr[NR_RANGE_MR];
516 unsigned long ret = 0;
517 int nr_range, i;
518
519 pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n",
520 start, end - 1);
521
522 memset(mr, 0, sizeof(mr));
523 nr_range = split_mem_range(mr, 0, start, end);
524
525 for (i = 0; i < nr_range; i++)
526 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
527 mr[i].page_size_mask,
528 prot);
529
530 add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
531
532 return ret >> PAGE_SHIFT;
533}
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548static unsigned long __init init_range_memory_mapping(
549 unsigned long r_start,
550 unsigned long r_end)
551{
552 unsigned long start_pfn, end_pfn;
553 unsigned long mapped_ram_size = 0;
554 int i;
555
556 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
557 u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end);
558 u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end);
559 if (start >= end)
560 continue;
561
562
563
564
565
566 can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
567 min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
568 init_memory_mapping(start, end, PAGE_KERNEL);
569 mapped_ram_size += end - start;
570 can_use_brk_pgt = true;
571 }
572
573 return mapped_ram_size;
574}
575
576static unsigned long __init get_new_step_size(unsigned long step_size)
577{
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592 return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
593}
594
595
596
597
598
599
600
601
602
603
604
605static void __init memory_map_top_down(unsigned long map_start,
606 unsigned long map_end)
607{
608 unsigned long real_end, last_start;
609 unsigned long step_size;
610 unsigned long addr;
611 unsigned long mapped_ram_size = 0;
612
613
614 addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
615 real_end = addr + PMD_SIZE;
616
617
618 step_size = PMD_SIZE;
619 max_pfn_mapped = 0;
620 min_pfn_mapped = real_end >> PAGE_SHIFT;
621 last_start = real_end;
622
623
624
625
626
627
628
629 while (last_start > map_start) {
630 unsigned long start;
631
632 if (last_start > step_size) {
633 start = round_down(last_start - 1, step_size);
634 if (start < map_start)
635 start = map_start;
636 } else
637 start = map_start;
638 mapped_ram_size += init_range_memory_mapping(start,
639 last_start);
640 last_start = start;
641 min_pfn_mapped = last_start >> PAGE_SHIFT;
642 if (mapped_ram_size >= step_size)
643 step_size = get_new_step_size(step_size);
644 }
645
646 if (real_end < map_end)
647 init_range_memory_mapping(real_end, map_end);
648}
649
650
651
652
653
654
655
656
657
658
659
660
661static void __init memory_map_bottom_up(unsigned long map_start,
662 unsigned long map_end)
663{
664 unsigned long next, start;
665 unsigned long mapped_ram_size = 0;
666
667 unsigned long step_size = PMD_SIZE;
668
669 start = map_start;
670 min_pfn_mapped = start >> PAGE_SHIFT;
671
672
673
674
675
676
677
678 while (start < map_end) {
679 if (step_size && map_end - start > step_size) {
680 next = round_up(start + 1, step_size);
681 if (next > map_end)
682 next = map_end;
683 } else {
684 next = map_end;
685 }
686
687 mapped_ram_size += init_range_memory_mapping(start, next);
688 start = next;
689
690 if (mapped_ram_size >= step_size)
691 step_size = get_new_step_size(step_size);
692 }
693}
694
695
696
697
698
699
700
701
702
703
704
705
706
707static void __init init_trampoline(void)
708{
709#ifdef CONFIG_X86_64
710 if (!kaslr_memory_enabled())
711 trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
712 else
713 init_trampoline_kaslr();
714#endif
715}
716
717void __init init_mem_mapping(void)
718{
719 unsigned long end;
720
721 pti_check_boottime_disable();
722 probe_page_size_mask();
723 setup_pcid();
724
725#ifdef CONFIG_X86_64
726 end = max_pfn << PAGE_SHIFT;
727#else
728 end = max_low_pfn << PAGE_SHIFT;
729#endif
730
731
732 init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL);
733
734
735 init_trampoline();
736
737
738
739
740
741 if (memblock_bottom_up()) {
742 unsigned long kernel_end = __pa_symbol(_end);
743
744
745
746
747
748
749
750
751 memory_map_bottom_up(kernel_end, end);
752 memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
753 } else {
754 memory_map_top_down(ISA_END_ADDRESS, end);
755 }
756
757#ifdef CONFIG_X86_64
758 if (max_pfn > max_low_pfn) {
759
760 max_low_pfn = max_pfn;
761 }
762#else
763 early_ioremap_page_table_range_init();
764#endif
765
766 load_cr3(swapper_pg_dir);
767 __flush_tlb_all();
768
769 x86_init.hyper.init_mem_mapping();
770
771 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
772}
773
774
775
776
777
778void __init poking_init(void)
779{
780 spinlock_t *ptl;
781 pte_t *ptep;
782
783 poking_mm = copy_init_mm();
784 BUG_ON(!poking_mm);
785
786
787
788
789
790
791 poking_addr = TASK_UNMAPPED_BASE;
792 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
793 poking_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) %
794 (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE);
795
796 if (((poking_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
797 poking_addr += PAGE_SIZE;
798
799
800
801
802
803
804 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
805 BUG_ON(!ptep);
806 pte_unmap_unlock(ptep, ptl);
807}
808
809
810
811
812
813
814
815
816
817
818
819
820
821int devmem_is_allowed(unsigned long pagenr)
822{
823 if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
824 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
825 != REGION_DISJOINT) {
826
827
828
829
830 if (pagenr < 256)
831 return 2;
832
833 return 0;
834 }
835
836
837
838
839
840 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
841
842 if (pagenr < 256)
843 return 1;
844
845 return 0;
846 }
847
848 return 1;
849}
850
851void free_init_pages(const char *what, unsigned long begin, unsigned long end)
852{
853 unsigned long begin_aligned, end_aligned;
854
855
856 begin_aligned = PAGE_ALIGN(begin);
857 end_aligned = end & PAGE_MASK;
858
859 if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
860 begin = begin_aligned;
861 end = end_aligned;
862 }
863
864 if (begin >= end)
865 return;
866
867
868
869
870
871
872 if (debug_pagealloc_enabled()) {
873 pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
874 begin, end - 1);
875
876
877
878
879 kmemleak_free_part((void *)begin, end - begin);
880 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
881 } else {
882
883
884
885
886
887 set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
888 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
889
890 free_reserved_area((void *)begin, (void *)end,
891 POISON_FREE_INITMEM, what);
892 }
893}
894
895
896
897
898
899
900void free_kernel_image_pages(const char *what, void *begin, void *end)
901{
902 unsigned long begin_ul = (unsigned long)begin;
903 unsigned long end_ul = (unsigned long)end;
904 unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT;
905
906 free_init_pages(what, begin_ul, end_ul);
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923 if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI))
924 set_memory_np_noalias(begin_ul, len_pages);
925}
926
927void __ref free_initmem(void)
928{
929 e820__reallocate_tables();
930
931 mem_encrypt_free_decrypted_mem();
932
933 free_kernel_image_pages("unused kernel image (initmem)",
934 &__init_begin, &__init_end);
935}
936
937#ifdef CONFIG_BLK_DEV_INITRD
938void __init free_initrd_mem(unsigned long start, unsigned long end)
939{
940
941
942
943
944
945
946
947
948
949 free_init_pages("initrd", start, PAGE_ALIGN(end));
950}
951#endif
952
953
954
955
956
957
958
959
960
961void __init memblock_find_dma_reserve(void)
962{
963#ifdef CONFIG_X86_64
964 u64 nr_pages = 0, nr_free_pages = 0;
965 unsigned long start_pfn, end_pfn;
966 phys_addr_t start_addr, end_addr;
967 int i;
968 u64 u;
969
970
971
972
973
974 nr_pages = 0;
975 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
976 start_pfn = min(start_pfn, MAX_DMA_PFN);
977 end_pfn = min(end_pfn, MAX_DMA_PFN);
978
979 nr_pages += end_pfn - start_pfn;
980 }
981
982
983
984
985
986
987 nr_free_pages = 0;
988 for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) {
989 start_pfn = min_t(unsigned long, PFN_UP(start_addr), MAX_DMA_PFN);
990 end_pfn = min_t(unsigned long, PFN_DOWN(end_addr), MAX_DMA_PFN);
991
992 if (start_pfn < end_pfn)
993 nr_free_pages += end_pfn - start_pfn;
994 }
995
996 set_dma_reserve(nr_pages - nr_free_pages);
997#endif
998}
999
1000void __init zone_sizes_init(void)
1001{
1002 unsigned long max_zone_pfns[MAX_NR_ZONES];
1003
1004 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1005
1006#ifdef CONFIG_ZONE_DMA
1007 max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn);
1008#endif
1009#ifdef CONFIG_ZONE_DMA32
1010 max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn);
1011#endif
1012 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
1013#ifdef CONFIG_HIGHMEM
1014 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
1015#endif
1016
1017 free_area_init(max_zone_pfns);
1018}
1019
1020__visible DEFINE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate) = {
1021 .loaded_mm = &init_mm,
1022 .next_asid = 1,
1023 .cr4 = ~0UL,
1024};
1025
1026void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
1027{
1028
1029 BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB);
1030
1031 __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
1032 __pte2cachemode_tbl[entry] = cache;
1033}
1034
1035#ifdef CONFIG_SWAP
1036unsigned long max_swapfile_size(void)
1037{
1038 unsigned long pages;
1039
1040 pages = generic_max_swapfile_size();
1041
1042 if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
1043
1044 unsigned long long l1tf_limit = l1tf_pfn_limit();
1045
1046
1047
1048
1049#if CONFIG_PGTABLE_LEVELS > 2
1050 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
1051#endif
1052 pages = min_t(unsigned long long, l1tf_limit, pages);
1053 }
1054 return pages;
1055}
1056#endif
1057