1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/suspend.h>
28#include <linux/pagevec.h>
29#include <linux/blkdev.h>
30#include <linux/slab.h>
31#include <linux/oom.h>
32#include <linux/notifier.h>
33#include <linux/topology.h>
34#include <linux/sysctl.h>
35#include <linux/cpu.h>
36#include <linux/cpuset.h>
37#include <linux/memory_hotplug.h>
38#include <linux/nodemask.h>
39#include <linux/vmalloc.h>
40#include <linux/mempolicy.h>
41#include <linux/stop_machine.h>
42#include <linux/sort.h>
43#include <linux/pfn.h>
44#include <linux/backing-dev.h>
45#include <linux/fault-inject.h>
46#include <linux/page-isolation.h>
47#include <linux/memcontrol.h>
48#include <linux/debugobjects.h>
49
50#include <asm/tlbflush.h>
51#include <asm/div64.h>
52#include "internal.h"
53
54
55
56
57nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
58 [N_POSSIBLE] = NODE_MASK_ALL,
59 [N_ONLINE] = { { [0] = 1UL } },
60#ifndef CONFIG_NUMA
61 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
62#ifdef CONFIG_HIGHMEM
63 [N_HIGH_MEMORY] = { { [0] = 1UL } },
64#endif
65 [N_CPU] = { { [0] = 1UL } },
66#endif
67};
68EXPORT_SYMBOL(node_states);
69
70unsigned long totalram_pages __read_mostly;
71unsigned long totalreserve_pages __read_mostly;
72long nr_swap_pages;
73int percpu_pagelist_fraction;
74
75#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
76int pageblock_order __read_mostly;
77#endif
78
79static void __free_pages_ok(struct page *page, unsigned int order);
80
81
82
83
84
85
86
87
88
89
90
91
92int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
93#ifdef CONFIG_ZONE_DMA
94 256,
95#endif
96#ifdef CONFIG_ZONE_DMA32
97 256,
98#endif
99#ifdef CONFIG_HIGHMEM
100 32,
101#endif
102 32,
103};
104
105EXPORT_SYMBOL(totalram_pages);
106
107static char * const zone_names[MAX_NR_ZONES] = {
108#ifdef CONFIG_ZONE_DMA
109 "DMA",
110#endif
111#ifdef CONFIG_ZONE_DMA32
112 "DMA32",
113#endif
114 "Normal",
115#ifdef CONFIG_HIGHMEM
116 "HighMem",
117#endif
118 "Movable",
119};
120
121int min_free_kbytes = 1024;
122
123unsigned long __meminitdata nr_kernel_pages;
124unsigned long __meminitdata nr_all_pages;
125static unsigned long __meminitdata dma_reserve;
126
127#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
128
129
130
131
132
133
134
135 #ifdef CONFIG_MAX_ACTIVE_REGIONS
136
137 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
138 #else
139 #if MAX_NUMNODES >= 32
140
141 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
142 #else
143
144 #define MAX_ACTIVE_REGIONS 256
145 #endif
146 #endif
147
148 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
149 static int __meminitdata nr_nodemap_entries;
150 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
151 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
152#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
153 static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
154 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
155#endif
156 unsigned long __initdata required_kernelcore;
157 static unsigned long __initdata required_movablecore;
158 unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
159
160
161 int movable_zone;
162 EXPORT_SYMBOL(movable_zone);
163#endif
164
165#if MAX_NUMNODES > 1
166int nr_node_ids __read_mostly = MAX_NUMNODES;
167EXPORT_SYMBOL(nr_node_ids);
168#endif
169
170int page_group_by_mobility_disabled __read_mostly;
171
172static void set_pageblock_migratetype(struct page *page, int migratetype)
173{
174 set_pageblock_flags_group(page, (unsigned long)migratetype,
175 PB_migrate, PB_migrate_end);
176}
177
178#ifdef CONFIG_DEBUG_VM
179static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
180{
181 int ret = 0;
182 unsigned seq;
183 unsigned long pfn = page_to_pfn(page);
184
185 do {
186 seq = zone_span_seqbegin(zone);
187 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
188 ret = 1;
189 else if (pfn < zone->zone_start_pfn)
190 ret = 1;
191 } while (zone_span_seqretry(zone, seq));
192
193 return ret;
194}
195
196static int page_is_consistent(struct zone *zone, struct page *page)
197{
198 if (!pfn_valid_within(page_to_pfn(page)))
199 return 0;
200 if (zone != page_zone(page))
201 return 0;
202
203 return 1;
204}
205
206
207
208static int bad_range(struct zone *zone, struct page *page)
209{
210 if (page_outside_zone_boundaries(zone, page))
211 return 1;
212 if (!page_is_consistent(zone, page))
213 return 1;
214
215 return 0;
216}
217#else
218static inline int bad_range(struct zone *zone, struct page *page)
219{
220 return 0;
221}
222#endif
223
224static void bad_page(struct page *page)
225{
226 void *pc = page_get_page_cgroup(page);
227
228 printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG
229 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
230 current->comm, page, (int)(2*sizeof(unsigned long)),
231 (unsigned long)page->flags, page->mapping,
232 page_mapcount(page), page_count(page));
233 if (pc) {
234 printk(KERN_EMERG "cgroup:%p\n", pc);
235 page_reset_bad_cgroup(page);
236 }
237 printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
238 KERN_EMERG "Backtrace:\n");
239 dump_stack();
240 page->flags &= ~PAGE_FLAGS_CLEAR_WHEN_BAD;
241 set_page_count(page, 0);
242 reset_page_mapcount(page);
243 page->mapping = NULL;
244 add_taint(TAINT_BAD_PAGE);
245}
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262static void free_compound_page(struct page *page)
263{
264 __free_pages_ok(page, compound_order(page));
265}
266
267static void prep_compound_page(struct page *page, unsigned long order)
268{
269 int i;
270 int nr_pages = 1 << order;
271
272 set_compound_page_dtor(page, free_compound_page);
273 set_compound_order(page, order);
274 __SetPageHead(page);
275 for (i = 1; i < nr_pages; i++) {
276 struct page *p = page + i;
277
278 __SetPageTail(p);
279 p->first_page = page;
280 }
281}
282
283static void destroy_compound_page(struct page *page, unsigned long order)
284{
285 int i;
286 int nr_pages = 1 << order;
287
288 if (unlikely(compound_order(page) != order))
289 bad_page(page);
290
291 if (unlikely(!PageHead(page)))
292 bad_page(page);
293 __ClearPageHead(page);
294 for (i = 1; i < nr_pages; i++) {
295 struct page *p = page + i;
296
297 if (unlikely(!PageTail(p) |
298 (p->first_page != page)))
299 bad_page(page);
300 __ClearPageTail(p);
301 }
302}
303
304static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
305{
306 int i;
307
308
309
310
311
312 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
313 for (i = 0; i < (1 << order); i++)
314 clear_highpage(page + i);
315}
316
317static inline void set_page_order(struct page *page, int order)
318{
319 set_page_private(page, order);
320 __SetPageBuddy(page);
321}
322
323static inline void rmv_page_order(struct page *page)
324{
325 __ClearPageBuddy(page);
326 set_page_private(page, 0);
327}
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346static inline struct page *
347__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
348{
349 unsigned long buddy_idx = page_idx ^ (1 << order);
350
351 return page + (buddy_idx - page_idx);
352}
353
354static inline unsigned long
355__find_combined_index(unsigned long page_idx, unsigned int order)
356{
357 return (page_idx & ~(1 << order));
358}
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373static inline int page_is_buddy(struct page *page, struct page *buddy,
374 int order)
375{
376 if (!pfn_valid_within(page_to_pfn(buddy)))
377 return 0;
378
379 if (page_zone_id(page) != page_zone_id(buddy))
380 return 0;
381
382 if (PageBuddy(buddy) && page_order(buddy) == order) {
383 BUG_ON(page_count(buddy) != 0);
384 return 1;
385 }
386 return 0;
387}
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413static inline void __free_one_page(struct page *page,
414 struct zone *zone, unsigned int order)
415{
416 unsigned long page_idx;
417 int order_size = 1 << order;
418 int migratetype = get_pageblock_migratetype(page);
419
420 if (unlikely(PageCompound(page)))
421 destroy_compound_page(page, order);
422
423 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
424
425 VM_BUG_ON(page_idx & (order_size - 1));
426 VM_BUG_ON(bad_range(zone, page));
427
428 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
429 while (order < MAX_ORDER-1) {
430 unsigned long combined_idx;
431 struct page *buddy;
432
433 buddy = __page_find_buddy(page, page_idx, order);
434 if (!page_is_buddy(page, buddy, order))
435 break;
436
437 list_del(&buddy->lru);
438 zone->free_area[order].nr_free--;
439 rmv_page_order(buddy);
440 combined_idx = __find_combined_index(page_idx, order);
441 page = page + (combined_idx - page_idx);
442 page_idx = combined_idx;
443 order++;
444 }
445 set_page_order(page, order);
446 list_add(&page->lru,
447 &zone->free_area[order].free_list[migratetype]);
448 zone->free_area[order].nr_free++;
449}
450
451static inline int free_pages_check(struct page *page)
452{
453 if (unlikely(page_mapcount(page) |
454 (page->mapping != NULL) |
455 (page_get_page_cgroup(page) != NULL) |
456 (page_count(page) != 0) |
457 (page->flags & PAGE_FLAGS_CHECK_AT_FREE)))
458 bad_page(page);
459 if (PageDirty(page))
460 __ClearPageDirty(page);
461
462
463
464
465
466 return PageReserved(page);
467}
468
469
470
471
472
473
474
475
476
477
478
479
480static void free_pages_bulk(struct zone *zone, int count,
481 struct list_head *list, int order)
482{
483 spin_lock(&zone->lock);
484 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
485 zone->pages_scanned = 0;
486 while (count--) {
487 struct page *page;
488
489 VM_BUG_ON(list_empty(list));
490 page = list_entry(list->prev, struct page, lru);
491
492 list_del(&page->lru);
493 __free_one_page(page, zone, order);
494 }
495 spin_unlock(&zone->lock);
496}
497
498static void free_one_page(struct zone *zone, struct page *page, int order)
499{
500 spin_lock(&zone->lock);
501 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
502 zone->pages_scanned = 0;
503 __free_one_page(page, zone, order);
504 spin_unlock(&zone->lock);
505}
506
507static void __free_pages_ok(struct page *page, unsigned int order)
508{
509 unsigned long flags;
510 int i;
511 int reserved = 0;
512
513 for (i = 0 ; i < (1 << order) ; ++i)
514 reserved += free_pages_check(page + i);
515 if (reserved)
516 return;
517
518 if (!PageHighMem(page)) {
519 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
520 debug_check_no_obj_freed(page_address(page),
521 PAGE_SIZE << order);
522 }
523 arch_free_page(page, order);
524 kernel_map_pages(page, 1 << order, 0);
525
526 local_irq_save(flags);
527 __count_vm_events(PGFREE, 1 << order);
528 free_one_page(page_zone(page), page, order);
529 local_irq_restore(flags);
530}
531
532
533
534
535void __free_pages_bootmem(struct page *page, unsigned int order)
536{
537 if (order == 0) {
538 __ClearPageReserved(page);
539 set_page_count(page, 0);
540 set_page_refcounted(page);
541 __free_page(page);
542 } else {
543 int loop;
544
545 prefetchw(page);
546 for (loop = 0; loop < BITS_PER_LONG; loop++) {
547 struct page *p = &page[loop];
548
549 if (loop + 1 < BITS_PER_LONG)
550 prefetchw(p + 1);
551 __ClearPageReserved(p);
552 set_page_count(p, 0);
553 }
554
555 set_page_refcounted(page);
556 __free_pages(page, order);
557 }
558}
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575static inline void expand(struct zone *zone, struct page *page,
576 int low, int high, struct free_area *area,
577 int migratetype)
578{
579 unsigned long size = 1 << high;
580
581 while (high > low) {
582 area--;
583 high--;
584 size >>= 1;
585 VM_BUG_ON(bad_range(zone, &page[size]));
586 list_add(&page[size].lru, &area->free_list[migratetype]);
587 area->nr_free++;
588 set_page_order(&page[size], high);
589 }
590}
591
592
593
594
595static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
596{
597 if (unlikely(page_mapcount(page) |
598 (page->mapping != NULL) |
599 (page_get_page_cgroup(page) != NULL) |
600 (page_count(page) != 0) |
601 (page->flags & PAGE_FLAGS_CHECK_AT_PREP)))
602 bad_page(page);
603
604
605
606
607
608 if (PageReserved(page))
609 return 1;
610
611 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_reclaim |
612 1 << PG_referenced | 1 << PG_arch_1 |
613 1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
614 set_page_private(page, 0);
615 set_page_refcounted(page);
616
617 arch_alloc_page(page, order);
618 kernel_map_pages(page, 1 << order, 1);
619
620 if (gfp_flags & __GFP_ZERO)
621 prep_zero_page(page, order, gfp_flags);
622
623 if (order && (gfp_flags & __GFP_COMP))
624 prep_compound_page(page, order);
625
626 return 0;
627}
628
629
630
631
632
633static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
634 int migratetype)
635{
636 unsigned int current_order;
637 struct free_area * area;
638 struct page *page;
639
640
641 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
642 area = &(zone->free_area[current_order]);
643 if (list_empty(&area->free_list[migratetype]))
644 continue;
645
646 page = list_entry(area->free_list[migratetype].next,
647 struct page, lru);
648 list_del(&page->lru);
649 rmv_page_order(page);
650 area->nr_free--;
651 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
652 expand(zone, page, order, current_order, area, migratetype);
653 return page;
654 }
655
656 return NULL;
657}
658
659
660
661
662
663
664static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
665 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
666 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
667 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
668 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE },
669};
670
671
672
673
674
675
676int move_freepages(struct zone *zone,
677 struct page *start_page, struct page *end_page,
678 int migratetype)
679{
680 struct page *page;
681 unsigned long order;
682 int pages_moved = 0;
683
684#ifndef CONFIG_HOLES_IN_ZONE
685
686
687
688
689
690
691
692 BUG_ON(page_zone(start_page) != page_zone(end_page));
693#endif
694
695 for (page = start_page; page <= end_page;) {
696
697 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
698
699 if (!pfn_valid_within(page_to_pfn(page))) {
700 page++;
701 continue;
702 }
703
704 if (!PageBuddy(page)) {
705 page++;
706 continue;
707 }
708
709 order = page_order(page);
710 list_del(&page->lru);
711 list_add(&page->lru,
712 &zone->free_area[order].free_list[migratetype]);
713 page += 1 << order;
714 pages_moved += 1 << order;
715 }
716
717 return pages_moved;
718}
719
720int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
721{
722 unsigned long start_pfn, end_pfn;
723 struct page *start_page, *end_page;
724
725 start_pfn = page_to_pfn(page);
726 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
727 start_page = pfn_to_page(start_pfn);
728 end_page = start_page + pageblock_nr_pages - 1;
729 end_pfn = start_pfn + pageblock_nr_pages - 1;
730
731
732 if (start_pfn < zone->zone_start_pfn)
733 start_page = page;
734 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
735 return 0;
736
737 return move_freepages(zone, start_page, end_page, migratetype);
738}
739
740
741static struct page *__rmqueue_fallback(struct zone *zone, int order,
742 int start_migratetype)
743{
744 struct free_area * area;
745 int current_order;
746 struct page *page;
747 int migratetype, i;
748
749
750 for (current_order = MAX_ORDER-1; current_order >= order;
751 --current_order) {
752 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
753 migratetype = fallbacks[start_migratetype][i];
754
755
756 if (migratetype == MIGRATE_RESERVE)
757 continue;
758
759 area = &(zone->free_area[current_order]);
760 if (list_empty(&area->free_list[migratetype]))
761 continue;
762
763 page = list_entry(area->free_list[migratetype].next,
764 struct page, lru);
765 area->nr_free--;
766
767
768
769
770
771
772
773 if (unlikely(current_order >= (pageblock_order >> 1)) ||
774 start_migratetype == MIGRATE_RECLAIMABLE) {
775 unsigned long pages;
776 pages = move_freepages_block(zone, page,
777 start_migratetype);
778
779
780 if (pages >= (1 << (pageblock_order-1)))
781 set_pageblock_migratetype(page,
782 start_migratetype);
783
784 migratetype = start_migratetype;
785 }
786
787
788 list_del(&page->lru);
789 rmv_page_order(page);
790 __mod_zone_page_state(zone, NR_FREE_PAGES,
791 -(1UL << order));
792
793 if (current_order == pageblock_order)
794 set_pageblock_migratetype(page,
795 start_migratetype);
796
797 expand(zone, page, order, current_order, area, migratetype);
798 return page;
799 }
800 }
801
802
803 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
804}
805
806
807
808
809
810static struct page *__rmqueue(struct zone *zone, unsigned int order,
811 int migratetype)
812{
813 struct page *page;
814
815 page = __rmqueue_smallest(zone, order, migratetype);
816
817 if (unlikely(!page))
818 page = __rmqueue_fallback(zone, order, migratetype);
819
820 return page;
821}
822
823
824
825
826
827
828static int rmqueue_bulk(struct zone *zone, unsigned int order,
829 unsigned long count, struct list_head *list,
830 int migratetype)
831{
832 int i;
833
834 spin_lock(&zone->lock);
835 for (i = 0; i < count; ++i) {
836 struct page *page = __rmqueue(zone, order, migratetype);
837 if (unlikely(page == NULL))
838 break;
839
840
841
842
843
844
845
846
847
848
849 list_add(&page->lru, list);
850 set_page_private(page, migratetype);
851 list = &page->lru;
852 }
853 spin_unlock(&zone->lock);
854 return i;
855}
856
857#ifdef CONFIG_NUMA
858
859
860
861
862
863
864
865
866void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
867{
868 unsigned long flags;
869 int to_drain;
870
871 local_irq_save(flags);
872 if (pcp->count >= pcp->batch)
873 to_drain = pcp->batch;
874 else
875 to_drain = pcp->count;
876 free_pages_bulk(zone, to_drain, &pcp->list, 0);
877 pcp->count -= to_drain;
878 local_irq_restore(flags);
879}
880#endif
881
882
883
884
885
886
887
888
889static void drain_pages(unsigned int cpu)
890{
891 unsigned long flags;
892 struct zone *zone;
893
894 for_each_zone(zone) {
895 struct per_cpu_pageset *pset;
896 struct per_cpu_pages *pcp;
897
898 if (!populated_zone(zone))
899 continue;
900
901 pset = zone_pcp(zone, cpu);
902
903 pcp = &pset->pcp;
904 local_irq_save(flags);
905 free_pages_bulk(zone, pcp->count, &pcp->list, 0);
906 pcp->count = 0;
907 local_irq_restore(flags);
908 }
909}
910
911
912
913
914void drain_local_pages(void *arg)
915{
916 drain_pages(smp_processor_id());
917}
918
919
920
921
922void drain_all_pages(void)
923{
924 on_each_cpu(drain_local_pages, NULL, 0, 1);
925}
926
927#ifdef CONFIG_HIBERNATION
928
929void mark_free_pages(struct zone *zone)
930{
931 unsigned long pfn, max_zone_pfn;
932 unsigned long flags;
933 int order, t;
934 struct list_head *curr;
935
936 if (!zone->spanned_pages)
937 return;
938
939 spin_lock_irqsave(&zone->lock, flags);
940
941 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
942 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
943 if (pfn_valid(pfn)) {
944 struct page *page = pfn_to_page(pfn);
945
946 if (!swsusp_page_is_forbidden(page))
947 swsusp_unset_page_free(page);
948 }
949
950 for_each_migratetype_order(order, t) {
951 list_for_each(curr, &zone->free_area[order].free_list[t]) {
952 unsigned long i;
953
954 pfn = page_to_pfn(list_entry(curr, struct page, lru));
955 for (i = 0; i < (1UL << order); i++)
956 swsusp_set_page_free(pfn_to_page(pfn + i));
957 }
958 }
959 spin_unlock_irqrestore(&zone->lock, flags);
960}
961#endif
962
963
964
965
966static void free_hot_cold_page(struct page *page, int cold)
967{
968 struct zone *zone = page_zone(page);
969 struct per_cpu_pages *pcp;
970 unsigned long flags;
971
972 if (PageAnon(page))
973 page->mapping = NULL;
974 if (free_pages_check(page))
975 return;
976
977 if (!PageHighMem(page)) {
978 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
979 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
980 }
981 arch_free_page(page, 0);
982 kernel_map_pages(page, 1, 0);
983
984 pcp = &zone_pcp(zone, get_cpu())->pcp;
985 local_irq_save(flags);
986 __count_vm_event(PGFREE);
987 if (cold)
988 list_add_tail(&page->lru, &pcp->list);
989 else
990 list_add(&page->lru, &pcp->list);
991 set_page_private(page, get_pageblock_migratetype(page));
992 pcp->count++;
993 if (pcp->count >= pcp->high) {
994 free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
995 pcp->count -= pcp->batch;
996 }
997 local_irq_restore(flags);
998 put_cpu();
999}
1000
1001void free_hot_page(struct page *page)
1002{
1003 free_hot_cold_page(page, 0);
1004}
1005
1006void free_cold_page(struct page *page)
1007{
1008 free_hot_cold_page(page, 1);
1009}
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019void split_page(struct page *page, unsigned int order)
1020{
1021 int i;
1022
1023 VM_BUG_ON(PageCompound(page));
1024 VM_BUG_ON(!page_count(page));
1025 for (i = 1; i < (1 << order); i++)
1026 set_page_refcounted(page + i);
1027}
1028
1029
1030
1031
1032
1033
1034static struct page *buffered_rmqueue(struct zone *preferred_zone,
1035 struct zone *zone, int order, gfp_t gfp_flags)
1036{
1037 unsigned long flags;
1038 struct page *page;
1039 int cold = !!(gfp_flags & __GFP_COLD);
1040 int cpu;
1041 int migratetype = allocflags_to_migratetype(gfp_flags);
1042
1043again:
1044 cpu = get_cpu();
1045 if (likely(order == 0)) {
1046 struct per_cpu_pages *pcp;
1047
1048 pcp = &zone_pcp(zone, cpu)->pcp;
1049 local_irq_save(flags);
1050 if (!pcp->count) {
1051 pcp->count = rmqueue_bulk(zone, 0,
1052 pcp->batch, &pcp->list, migratetype);
1053 if (unlikely(!pcp->count))
1054 goto failed;
1055 }
1056
1057
1058 if (cold) {
1059 list_for_each_entry_reverse(page, &pcp->list, lru)
1060 if (page_private(page) == migratetype)
1061 break;
1062 } else {
1063 list_for_each_entry(page, &pcp->list, lru)
1064 if (page_private(page) == migratetype)
1065 break;
1066 }
1067
1068
1069 if (unlikely(&page->lru == &pcp->list)) {
1070 pcp->count += rmqueue_bulk(zone, 0,
1071 pcp->batch, &pcp->list, migratetype);
1072 page = list_entry(pcp->list.next, struct page, lru);
1073 }
1074
1075 list_del(&page->lru);
1076 pcp->count--;
1077 } else {
1078 spin_lock_irqsave(&zone->lock, flags);
1079 page = __rmqueue(zone, order, migratetype);
1080 spin_unlock(&zone->lock);
1081 if (!page)
1082 goto failed;
1083 }
1084
1085 __count_zone_vm_events(PGALLOC, zone, 1 << order);
1086 zone_statistics(preferred_zone, zone);
1087 local_irq_restore(flags);
1088 put_cpu();
1089
1090 VM_BUG_ON(bad_range(zone, page));
1091 if (prep_new_page(page, order, gfp_flags))
1092 goto again;
1093 return page;
1094
1095failed:
1096 local_irq_restore(flags);
1097 put_cpu();
1098 return NULL;
1099}
1100
1101#define ALLOC_NO_WATERMARKS 0x01
1102#define ALLOC_WMARK_MIN 0x02
1103#define ALLOC_WMARK_LOW 0x04
1104#define ALLOC_WMARK_HIGH 0x08
1105#define ALLOC_HARDER 0x10
1106#define ALLOC_HIGH 0x20
1107#define ALLOC_CPUSET 0x40
1108
1109#ifdef CONFIG_FAIL_PAGE_ALLOC
1110
1111static struct fail_page_alloc_attr {
1112 struct fault_attr attr;
1113
1114 u32 ignore_gfp_highmem;
1115 u32 ignore_gfp_wait;
1116 u32 min_order;
1117
1118#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1119
1120 struct dentry *ignore_gfp_highmem_file;
1121 struct dentry *ignore_gfp_wait_file;
1122 struct dentry *min_order_file;
1123
1124#endif
1125
1126} fail_page_alloc = {
1127 .attr = FAULT_ATTR_INITIALIZER,
1128 .ignore_gfp_wait = 1,
1129 .ignore_gfp_highmem = 1,
1130 .min_order = 1,
1131};
1132
1133static int __init setup_fail_page_alloc(char *str)
1134{
1135 return setup_fault_attr(&fail_page_alloc.attr, str);
1136}
1137__setup("fail_page_alloc=", setup_fail_page_alloc);
1138
1139static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1140{
1141 if (order < fail_page_alloc.min_order)
1142 return 0;
1143 if (gfp_mask & __GFP_NOFAIL)
1144 return 0;
1145 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1146 return 0;
1147 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1148 return 0;
1149
1150 return should_fail(&fail_page_alloc.attr, 1 << order);
1151}
1152
1153#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1154
1155static int __init fail_page_alloc_debugfs(void)
1156{
1157 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1158 struct dentry *dir;
1159 int err;
1160
1161 err = init_fault_attr_dentries(&fail_page_alloc.attr,
1162 "fail_page_alloc");
1163 if (err)
1164 return err;
1165 dir = fail_page_alloc.attr.dentries.dir;
1166
1167 fail_page_alloc.ignore_gfp_wait_file =
1168 debugfs_create_bool("ignore-gfp-wait", mode, dir,
1169 &fail_page_alloc.ignore_gfp_wait);
1170
1171 fail_page_alloc.ignore_gfp_highmem_file =
1172 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1173 &fail_page_alloc.ignore_gfp_highmem);
1174 fail_page_alloc.min_order_file =
1175 debugfs_create_u32("min-order", mode, dir,
1176 &fail_page_alloc.min_order);
1177
1178 if (!fail_page_alloc.ignore_gfp_wait_file ||
1179 !fail_page_alloc.ignore_gfp_highmem_file ||
1180 !fail_page_alloc.min_order_file) {
1181 err = -ENOMEM;
1182 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1183 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1184 debugfs_remove(fail_page_alloc.min_order_file);
1185 cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1186 }
1187
1188 return err;
1189}
1190
1191late_initcall(fail_page_alloc_debugfs);
1192
1193#endif
1194
1195#else
1196
1197static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1198{
1199 return 0;
1200}
1201
1202#endif
1203
1204
1205
1206
1207
1208int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1209 int classzone_idx, int alloc_flags)
1210{
1211
1212 long min = mark;
1213 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1214 int o;
1215
1216 if (alloc_flags & ALLOC_HIGH)
1217 min -= min / 2;
1218 if (alloc_flags & ALLOC_HARDER)
1219 min -= min / 4;
1220
1221 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1222 return 0;
1223 for (o = 0; o < order; o++) {
1224
1225 free_pages -= z->free_area[o].nr_free << o;
1226
1227
1228 min >>= 1;
1229
1230 if (free_pages <= min)
1231 return 0;
1232 }
1233 return 1;
1234}
1235
1236#ifdef CONFIG_NUMA
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1260{
1261 struct zonelist_cache *zlc;
1262 nodemask_t *allowednodes;
1263
1264 zlc = zonelist->zlcache_ptr;
1265 if (!zlc)
1266 return NULL;
1267
1268 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1269 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1270 zlc->last_full_zap = jiffies;
1271 }
1272
1273 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1274 &cpuset_current_mems_allowed :
1275 &node_states[N_HIGH_MEMORY];
1276 return allowednodes;
1277}
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1302 nodemask_t *allowednodes)
1303{
1304 struct zonelist_cache *zlc;
1305 int i;
1306 int n;
1307
1308 zlc = zonelist->zlcache_ptr;
1309 if (!zlc)
1310 return 1;
1311
1312 i = z - zonelist->_zonerefs;
1313 n = zlc->z_to_n[i];
1314
1315
1316 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1317}
1318
1319
1320
1321
1322
1323
1324static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1325{
1326 struct zonelist_cache *zlc;
1327 int i;
1328
1329 zlc = zonelist->zlcache_ptr;
1330 if (!zlc)
1331 return;
1332
1333 i = z - zonelist->_zonerefs;
1334
1335 set_bit(i, zlc->fullzones);
1336}
1337
1338#else
1339
1340static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1341{
1342 return NULL;
1343}
1344
1345static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1346 nodemask_t *allowednodes)
1347{
1348 return 1;
1349}
1350
1351static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1352{
1353}
1354#endif
1355
1356
1357
1358
1359
1360static struct page *
1361get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1362 struct zonelist *zonelist, int high_zoneidx, int alloc_flags)
1363{
1364 struct zoneref *z;
1365 struct page *page = NULL;
1366 int classzone_idx;
1367 struct zone *zone, *preferred_zone;
1368 nodemask_t *allowednodes = NULL;
1369 int zlc_active = 0;
1370 int did_zlc_setup = 0;
1371
1372 (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
1373 &preferred_zone);
1374 if (!preferred_zone)
1375 return NULL;
1376
1377 classzone_idx = zone_idx(preferred_zone);
1378
1379zonelist_scan:
1380
1381
1382
1383
1384 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1385 high_zoneidx, nodemask) {
1386 if (NUMA_BUILD && zlc_active &&
1387 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1388 continue;
1389 if ((alloc_flags & ALLOC_CPUSET) &&
1390 !cpuset_zone_allowed_softwall(zone, gfp_mask))
1391 goto try_next_zone;
1392
1393 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1394 unsigned long mark;
1395 if (alloc_flags & ALLOC_WMARK_MIN)
1396 mark = zone->pages_min;
1397 else if (alloc_flags & ALLOC_WMARK_LOW)
1398 mark = zone->pages_low;
1399 else
1400 mark = zone->pages_high;
1401 if (!zone_watermark_ok(zone, order, mark,
1402 classzone_idx, alloc_flags)) {
1403 if (!zone_reclaim_mode ||
1404 !zone_reclaim(zone, gfp_mask, order))
1405 goto this_zone_full;
1406 }
1407 }
1408
1409 page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
1410 if (page)
1411 break;
1412this_zone_full:
1413 if (NUMA_BUILD)
1414 zlc_mark_zone_full(zonelist, z);
1415try_next_zone:
1416 if (NUMA_BUILD && !did_zlc_setup) {
1417
1418 allowednodes = zlc_setup(zonelist, alloc_flags);
1419 zlc_active = 1;
1420 did_zlc_setup = 1;
1421 }
1422 }
1423
1424 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1425
1426 zlc_active = 0;
1427 goto zonelist_scan;
1428 }
1429 return page;
1430}
1431
1432
1433
1434
1435static struct page *
1436__alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
1437 struct zonelist *zonelist, nodemask_t *nodemask)
1438{
1439 const gfp_t wait = gfp_mask & __GFP_WAIT;
1440 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1441 struct zoneref *z;
1442 struct zone *zone;
1443 struct page *page;
1444 struct reclaim_state reclaim_state;
1445 struct task_struct *p = current;
1446 int do_retry;
1447 int alloc_flags;
1448 unsigned long did_some_progress;
1449 unsigned long pages_reclaimed = 0;
1450
1451 might_sleep_if(wait);
1452
1453 if (should_fail_alloc_page(gfp_mask, order))
1454 return NULL;
1455
1456restart:
1457 z = zonelist->_zonerefs;
1458
1459 if (unlikely(!z->zone)) {
1460
1461
1462
1463
1464 return NULL;
1465 }
1466
1467 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1468 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET);
1469 if (page)
1470 goto got_pg;
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1481 goto nopage;
1482
1483 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1484 wakeup_kswapd(zone, order);
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496 alloc_flags = ALLOC_WMARK_MIN;
1497 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1498 alloc_flags |= ALLOC_HARDER;
1499 if (gfp_mask & __GFP_HIGH)
1500 alloc_flags |= ALLOC_HIGH;
1501 if (wait)
1502 alloc_flags |= ALLOC_CPUSET;
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1513 high_zoneidx, alloc_flags);
1514 if (page)
1515 goto got_pg;
1516
1517
1518
1519rebalance:
1520 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
1521 && !in_interrupt()) {
1522 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
1523nofail_alloc:
1524
1525 page = get_page_from_freelist(gfp_mask, nodemask, order,
1526 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
1527 if (page)
1528 goto got_pg;
1529 if (gfp_mask & __GFP_NOFAIL) {
1530 congestion_wait(WRITE, HZ/50);
1531 goto nofail_alloc;
1532 }
1533 }
1534 goto nopage;
1535 }
1536
1537
1538 if (!wait)
1539 goto nopage;
1540
1541 cond_resched();
1542
1543
1544 cpuset_memory_pressure_bump();
1545 p->flags |= PF_MEMALLOC;
1546 reclaim_state.reclaimed_slab = 0;
1547 p->reclaim_state = &reclaim_state;
1548
1549 did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
1550
1551 p->reclaim_state = NULL;
1552 p->flags &= ~PF_MEMALLOC;
1553
1554 cond_resched();
1555
1556 if (order != 0)
1557 drain_all_pages();
1558
1559 if (likely(did_some_progress)) {
1560 page = get_page_from_freelist(gfp_mask, nodemask, order,
1561 zonelist, high_zoneidx, alloc_flags);
1562 if (page)
1563 goto got_pg;
1564 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1565 if (!try_set_zone_oom(zonelist, gfp_mask)) {
1566 schedule_timeout_uninterruptible(1);
1567 goto restart;
1568 }
1569
1570
1571
1572
1573
1574
1575
1576 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1577 order, zonelist, high_zoneidx,
1578 ALLOC_WMARK_HIGH|ALLOC_CPUSET);
1579 if (page) {
1580 clear_zonelist_oom(zonelist, gfp_mask);
1581 goto got_pg;
1582 }
1583
1584
1585 if (order > PAGE_ALLOC_COSTLY_ORDER) {
1586 clear_zonelist_oom(zonelist, gfp_mask);
1587 goto nopage;
1588 }
1589
1590 out_of_memory(zonelist, gfp_mask, order);
1591 clear_zonelist_oom(zonelist, gfp_mask);
1592 goto restart;
1593 }
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609 pages_reclaimed += did_some_progress;
1610 do_retry = 0;
1611 if (!(gfp_mask & __GFP_NORETRY)) {
1612 if (order <= PAGE_ALLOC_COSTLY_ORDER) {
1613 do_retry = 1;
1614 } else {
1615 if (gfp_mask & __GFP_REPEAT &&
1616 pages_reclaimed < (1 << order))
1617 do_retry = 1;
1618 }
1619 if (gfp_mask & __GFP_NOFAIL)
1620 do_retry = 1;
1621 }
1622 if (do_retry) {
1623 congestion_wait(WRITE, HZ/50);
1624 goto rebalance;
1625 }
1626
1627nopage:
1628 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1629 printk(KERN_WARNING "%s: page allocation failure."
1630 " order:%d, mode:0x%x\n",
1631 p->comm, order, gfp_mask);
1632 dump_stack();
1633 show_mem();
1634 }
1635got_pg:
1636 return page;
1637}
1638
1639struct page *
1640__alloc_pages(gfp_t gfp_mask, unsigned int order,
1641 struct zonelist *zonelist)
1642{
1643 return __alloc_pages_internal(gfp_mask, order, zonelist, NULL);
1644}
1645
1646struct page *
1647__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1648 struct zonelist *zonelist, nodemask_t *nodemask)
1649{
1650 return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask);
1651}
1652
1653EXPORT_SYMBOL(__alloc_pages);
1654
1655
1656
1657
1658unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1659{
1660 struct page * page;
1661 page = alloc_pages(gfp_mask, order);
1662 if (!page)
1663 return 0;
1664 return (unsigned long) page_address(page);
1665}
1666
1667EXPORT_SYMBOL(__get_free_pages);
1668
1669unsigned long get_zeroed_page(gfp_t gfp_mask)
1670{
1671 struct page * page;
1672
1673
1674
1675
1676
1677 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1678
1679 page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1680 if (page)
1681 return (unsigned long) page_address(page);
1682 return 0;
1683}
1684
1685EXPORT_SYMBOL(get_zeroed_page);
1686
1687void __pagevec_free(struct pagevec *pvec)
1688{
1689 int i = pagevec_count(pvec);
1690
1691 while (--i >= 0)
1692 free_hot_cold_page(pvec->pages[i], pvec->cold);
1693}
1694
1695void __free_pages(struct page *page, unsigned int order)
1696{
1697 if (put_page_testzero(page)) {
1698 if (order == 0)
1699 free_hot_page(page);
1700 else
1701 __free_pages_ok(page, order);
1702 }
1703}
1704
1705EXPORT_SYMBOL(__free_pages);
1706
1707void free_pages(unsigned long addr, unsigned int order)
1708{
1709 if (addr != 0) {
1710 VM_BUG_ON(!virt_addr_valid((void *)addr));
1711 __free_pages(virt_to_page((void *)addr), order);
1712 }
1713}
1714
1715EXPORT_SYMBOL(free_pages);
1716
1717static unsigned int nr_free_zone_pages(int offset)
1718{
1719 struct zoneref *z;
1720 struct zone *zone;
1721
1722
1723 unsigned int sum = 0;
1724
1725 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1726
1727 for_each_zone_zonelist(zone, z, zonelist, offset) {
1728 unsigned long size = zone->present_pages;
1729 unsigned long high = zone->pages_high;
1730 if (size > high)
1731 sum += size - high;
1732 }
1733
1734 return sum;
1735}
1736
1737
1738
1739
1740unsigned int nr_free_buffer_pages(void)
1741{
1742 return nr_free_zone_pages(gfp_zone(GFP_USER));
1743}
1744EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1745
1746
1747
1748
1749unsigned int nr_free_pagecache_pages(void)
1750{
1751 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1752}
1753
1754static inline void show_node(struct zone *zone)
1755{
1756 if (NUMA_BUILD)
1757 printk("Node %d ", zone_to_nid(zone));
1758}
1759
1760void si_meminfo(struct sysinfo *val)
1761{
1762 val->totalram = totalram_pages;
1763 val->sharedram = 0;
1764 val->freeram = global_page_state(NR_FREE_PAGES);
1765 val->bufferram = nr_blockdev_pages();
1766 val->totalhigh = totalhigh_pages;
1767 val->freehigh = nr_free_highpages();
1768 val->mem_unit = PAGE_SIZE;
1769}
1770
1771EXPORT_SYMBOL(si_meminfo);
1772
1773#ifdef CONFIG_NUMA
1774void si_meminfo_node(struct sysinfo *val, int nid)
1775{
1776 pg_data_t *pgdat = NODE_DATA(nid);
1777
1778 val->totalram = pgdat->node_present_pages;
1779 val->freeram = node_page_state(nid, NR_FREE_PAGES);
1780#ifdef CONFIG_HIGHMEM
1781 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1782 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
1783 NR_FREE_PAGES);
1784#else
1785 val->totalhigh = 0;
1786 val->freehigh = 0;
1787#endif
1788 val->mem_unit = PAGE_SIZE;
1789}
1790#endif
1791
1792#define K(x) ((x) << (PAGE_SHIFT-10))
1793
1794
1795
1796
1797
1798
1799void show_free_areas(void)
1800{
1801 int cpu;
1802 struct zone *zone;
1803
1804 for_each_zone(zone) {
1805 if (!populated_zone(zone))
1806 continue;
1807
1808 show_node(zone);
1809 printk("%s per-cpu:\n", zone->name);
1810
1811 for_each_online_cpu(cpu) {
1812 struct per_cpu_pageset *pageset;
1813
1814 pageset = zone_pcp(zone, cpu);
1815
1816 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
1817 cpu, pageset->pcp.high,
1818 pageset->pcp.batch, pageset->pcp.count);
1819 }
1820 }
1821
1822 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
1823 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
1824 global_page_state(NR_ACTIVE),
1825 global_page_state(NR_INACTIVE),
1826 global_page_state(NR_FILE_DIRTY),
1827 global_page_state(NR_WRITEBACK),
1828 global_page_state(NR_UNSTABLE_NFS),
1829 global_page_state(NR_FREE_PAGES),
1830 global_page_state(NR_SLAB_RECLAIMABLE) +
1831 global_page_state(NR_SLAB_UNRECLAIMABLE),
1832 global_page_state(NR_FILE_MAPPED),
1833 global_page_state(NR_PAGETABLE),
1834 global_page_state(NR_BOUNCE));
1835
1836 for_each_zone(zone) {
1837 int i;
1838
1839 if (!populated_zone(zone))
1840 continue;
1841
1842 show_node(zone);
1843 printk("%s"
1844 " free:%lukB"
1845 " min:%lukB"
1846 " low:%lukB"
1847 " high:%lukB"
1848 " active:%lukB"
1849 " inactive:%lukB"
1850 " present:%lukB"
1851 " pages_scanned:%lu"
1852 " all_unreclaimable? %s"
1853 "\n",
1854 zone->name,
1855 K(zone_page_state(zone, NR_FREE_PAGES)),
1856 K(zone->pages_min),
1857 K(zone->pages_low),
1858 K(zone->pages_high),
1859 K(zone_page_state(zone, NR_ACTIVE)),
1860 K(zone_page_state(zone, NR_INACTIVE)),
1861 K(zone->present_pages),
1862 zone->pages_scanned,
1863 (zone_is_all_unreclaimable(zone) ? "yes" : "no")
1864 );
1865 printk("lowmem_reserve[]:");
1866 for (i = 0; i < MAX_NR_ZONES; i++)
1867 printk(" %lu", zone->lowmem_reserve[i]);
1868 printk("\n");
1869 }
1870
1871 for_each_zone(zone) {
1872 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1873
1874 if (!populated_zone(zone))
1875 continue;
1876
1877 show_node(zone);
1878 printk("%s: ", zone->name);
1879
1880 spin_lock_irqsave(&zone->lock, flags);
1881 for (order = 0; order < MAX_ORDER; order++) {
1882 nr[order] = zone->free_area[order].nr_free;
1883 total += nr[order] << order;
1884 }
1885 spin_unlock_irqrestore(&zone->lock, flags);
1886 for (order = 0; order < MAX_ORDER; order++)
1887 printk("%lu*%lukB ", nr[order], K(1UL) << order);
1888 printk("= %lukB\n", K(total));
1889 }
1890
1891 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
1892
1893 show_swap_cache_info();
1894}
1895
1896static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
1897{
1898 zoneref->zone = zone;
1899 zoneref->zone_idx = zone_idx(zone);
1900}
1901
1902
1903
1904
1905
1906
1907static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
1908 int nr_zones, enum zone_type zone_type)
1909{
1910 struct zone *zone;
1911
1912 BUG_ON(zone_type >= MAX_NR_ZONES);
1913 zone_type++;
1914
1915 do {
1916 zone_type--;
1917 zone = pgdat->node_zones + zone_type;
1918 if (populated_zone(zone)) {
1919 zoneref_set_zone(zone,
1920 &zonelist->_zonerefs[nr_zones++]);
1921 check_highest_zone(zone_type);
1922 }
1923
1924 } while (zone_type);
1925 return nr_zones;
1926}
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938#define ZONELIST_ORDER_DEFAULT 0
1939#define ZONELIST_ORDER_NODE 1
1940#define ZONELIST_ORDER_ZONE 2
1941
1942
1943
1944
1945static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
1946static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
1947
1948
1949#ifdef CONFIG_NUMA
1950
1951static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
1952
1953#define NUMA_ZONELIST_ORDER_LEN 16
1954char numa_zonelist_order[16] = "default";
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964static int __parse_numa_zonelist_order(char *s)
1965{
1966 if (*s == 'd' || *s == 'D') {
1967 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
1968 } else if (*s == 'n' || *s == 'N') {
1969 user_zonelist_order = ZONELIST_ORDER_NODE;
1970 } else if (*s == 'z' || *s == 'Z') {
1971 user_zonelist_order = ZONELIST_ORDER_ZONE;
1972 } else {
1973 printk(KERN_WARNING
1974 "Ignoring invalid numa_zonelist_order value: "
1975 "%s\n", s);
1976 return -EINVAL;
1977 }
1978 return 0;
1979}
1980
1981static __init int setup_numa_zonelist_order(char *s)
1982{
1983 if (s)
1984 return __parse_numa_zonelist_order(s);
1985 return 0;
1986}
1987early_param("numa_zonelist_order", setup_numa_zonelist_order);
1988
1989
1990
1991
1992int numa_zonelist_order_handler(ctl_table *table, int write,
1993 struct file *file, void __user *buffer, size_t *length,
1994 loff_t *ppos)
1995{
1996 char saved_string[NUMA_ZONELIST_ORDER_LEN];
1997 int ret;
1998
1999 if (write)
2000 strncpy(saved_string, (char*)table->data,
2001 NUMA_ZONELIST_ORDER_LEN);
2002 ret = proc_dostring(table, write, file, buffer, length, ppos);
2003 if (ret)
2004 return ret;
2005 if (write) {
2006 int oldval = user_zonelist_order;
2007 if (__parse_numa_zonelist_order((char*)table->data)) {
2008
2009
2010
2011 strncpy((char*)table->data, saved_string,
2012 NUMA_ZONELIST_ORDER_LEN);
2013 user_zonelist_order = oldval;
2014 } else if (oldval != user_zonelist_order)
2015 build_all_zonelists();
2016 }
2017 return 0;
2018}
2019
2020
2021#define MAX_NODE_LOAD (num_online_nodes())
2022static int node_load[MAX_NUMNODES];
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038static int find_next_best_node(int node, nodemask_t *used_node_mask)
2039{
2040 int n, val;
2041 int min_val = INT_MAX;
2042 int best_node = -1;
2043 node_to_cpumask_ptr(tmp, 0);
2044
2045
2046 if (!node_isset(node, *used_node_mask)) {
2047 node_set(node, *used_node_mask);
2048 return node;
2049 }
2050
2051 for_each_node_state(n, N_HIGH_MEMORY) {
2052
2053
2054 if (node_isset(n, *used_node_mask))
2055 continue;
2056
2057
2058 val = node_distance(node, n);
2059
2060
2061 val += (n < node);
2062
2063
2064 node_to_cpumask_ptr_next(tmp, n);
2065 if (!cpus_empty(*tmp))
2066 val += PENALTY_FOR_NODE_WITH_CPUS;
2067
2068
2069 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2070 val += node_load[n];
2071
2072 if (val < min_val) {
2073 min_val = val;
2074 best_node = n;
2075 }
2076 }
2077
2078 if (best_node >= 0)
2079 node_set(best_node, *used_node_mask);
2080
2081 return best_node;
2082}
2083
2084
2085
2086
2087
2088
2089
2090static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2091{
2092 int j;
2093 struct zonelist *zonelist;
2094
2095 zonelist = &pgdat->node_zonelists[0];
2096 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2097 ;
2098 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2099 MAX_NR_ZONES - 1);
2100 zonelist->_zonerefs[j].zone = NULL;
2101 zonelist->_zonerefs[j].zone_idx = 0;
2102}
2103
2104
2105
2106
2107static void build_thisnode_zonelists(pg_data_t *pgdat)
2108{
2109 int j;
2110 struct zonelist *zonelist;
2111
2112 zonelist = &pgdat->node_zonelists[1];
2113 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2114 zonelist->_zonerefs[j].zone = NULL;
2115 zonelist->_zonerefs[j].zone_idx = 0;
2116}
2117
2118
2119
2120
2121
2122
2123
2124static int node_order[MAX_NUMNODES];
2125
2126static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2127{
2128 int pos, j, node;
2129 int zone_type;
2130 struct zone *z;
2131 struct zonelist *zonelist;
2132
2133 zonelist = &pgdat->node_zonelists[0];
2134 pos = 0;
2135 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2136 for (j = 0; j < nr_nodes; j++) {
2137 node = node_order[j];
2138 z = &NODE_DATA(node)->node_zones[zone_type];
2139 if (populated_zone(z)) {
2140 zoneref_set_zone(z,
2141 &zonelist->_zonerefs[pos++]);
2142 check_highest_zone(zone_type);
2143 }
2144 }
2145 }
2146 zonelist->_zonerefs[pos].zone = NULL;
2147 zonelist->_zonerefs[pos].zone_idx = 0;
2148}
2149
2150static int default_zonelist_order(void)
2151{
2152 int nid, zone_type;
2153 unsigned long low_kmem_size,total_size;
2154 struct zone *z;
2155 int average_size;
2156
2157
2158
2159
2160
2161
2162
2163 low_kmem_size = 0;
2164 total_size = 0;
2165 for_each_online_node(nid) {
2166 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2167 z = &NODE_DATA(nid)->node_zones[zone_type];
2168 if (populated_zone(z)) {
2169 if (zone_type < ZONE_NORMAL)
2170 low_kmem_size += z->present_pages;
2171 total_size += z->present_pages;
2172 }
2173 }
2174 }
2175 if (!low_kmem_size ||
2176 low_kmem_size > total_size/2)
2177 return ZONELIST_ORDER_NODE;
2178
2179
2180
2181
2182
2183 average_size = total_size /
2184 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2185 for_each_online_node(nid) {
2186 low_kmem_size = 0;
2187 total_size = 0;
2188 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2189 z = &NODE_DATA(nid)->node_zones[zone_type];
2190 if (populated_zone(z)) {
2191 if (zone_type < ZONE_NORMAL)
2192 low_kmem_size += z->present_pages;
2193 total_size += z->present_pages;
2194 }
2195 }
2196 if (low_kmem_size &&
2197 total_size > average_size &&
2198 low_kmem_size > total_size * 70/100)
2199 return ZONELIST_ORDER_NODE;
2200 }
2201 return ZONELIST_ORDER_ZONE;
2202}
2203
2204static void set_zonelist_order(void)
2205{
2206 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2207 current_zonelist_order = default_zonelist_order();
2208 else
2209 current_zonelist_order = user_zonelist_order;
2210}
2211
2212static void build_zonelists(pg_data_t *pgdat)
2213{
2214 int j, node, load;
2215 enum zone_type i;
2216 nodemask_t used_mask;
2217 int local_node, prev_node;
2218 struct zonelist *zonelist;
2219 int order = current_zonelist_order;
2220
2221
2222 for (i = 0; i < MAX_ZONELISTS; i++) {
2223 zonelist = pgdat->node_zonelists + i;
2224 zonelist->_zonerefs[0].zone = NULL;
2225 zonelist->_zonerefs[0].zone_idx = 0;
2226 }
2227
2228
2229 local_node = pgdat->node_id;
2230 load = num_online_nodes();
2231 prev_node = local_node;
2232 nodes_clear(used_mask);
2233
2234 memset(node_load, 0, sizeof(node_load));
2235 memset(node_order, 0, sizeof(node_order));
2236 j = 0;
2237
2238 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2239 int distance = node_distance(local_node, node);
2240
2241
2242
2243
2244
2245 if (distance > RECLAIM_DISTANCE)
2246 zone_reclaim_mode = 1;
2247
2248
2249
2250
2251
2252
2253 if (distance != node_distance(local_node, prev_node))
2254 node_load[node] = load;
2255
2256 prev_node = node;
2257 load--;
2258 if (order == ZONELIST_ORDER_NODE)
2259 build_zonelists_in_node_order(pgdat, node);
2260 else
2261 node_order[j++] = node;
2262 }
2263
2264 if (order == ZONELIST_ORDER_ZONE) {
2265
2266 build_zonelists_in_zone_order(pgdat, j);
2267 }
2268
2269 build_thisnode_zonelists(pgdat);
2270}
2271
2272
2273static void build_zonelist_cache(pg_data_t *pgdat)
2274{
2275 struct zonelist *zonelist;
2276 struct zonelist_cache *zlc;
2277 struct zoneref *z;
2278
2279 zonelist = &pgdat->node_zonelists[0];
2280 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2281 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2282 for (z = zonelist->_zonerefs; z->zone; z++)
2283 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2284}
2285
2286
2287#else
2288
2289static void set_zonelist_order(void)
2290{
2291 current_zonelist_order = ZONELIST_ORDER_ZONE;
2292}
2293
2294static void build_zonelists(pg_data_t *pgdat)
2295{
2296 int node, local_node;
2297 enum zone_type j;
2298 struct zonelist *zonelist;
2299
2300 local_node = pgdat->node_id;
2301
2302 zonelist = &pgdat->node_zonelists[0];
2303 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2314 if (!node_online(node))
2315 continue;
2316 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2317 MAX_NR_ZONES - 1);
2318 }
2319 for (node = 0; node < local_node; node++) {
2320 if (!node_online(node))
2321 continue;
2322 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2323 MAX_NR_ZONES - 1);
2324 }
2325
2326 zonelist->_zonerefs[j].zone = NULL;
2327 zonelist->_zonerefs[j].zone_idx = 0;
2328}
2329
2330
2331static void build_zonelist_cache(pg_data_t *pgdat)
2332{
2333 pgdat->node_zonelists[0].zlcache_ptr = NULL;
2334}
2335
2336#endif
2337
2338
2339static int __build_all_zonelists(void *dummy)
2340{
2341 int nid;
2342
2343 for_each_online_node(nid) {
2344 pg_data_t *pgdat = NODE_DATA(nid);
2345
2346 build_zonelists(pgdat);
2347 build_zonelist_cache(pgdat);
2348 }
2349 return 0;
2350}
2351
2352void build_all_zonelists(void)
2353{
2354 set_zonelist_order();
2355
2356 if (system_state == SYSTEM_BOOTING) {
2357 __build_all_zonelists(NULL);
2358 cpuset_init_current_mems_allowed();
2359 } else {
2360
2361
2362 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
2363
2364 }
2365 vm_total_pages = nr_free_pagecache_pages();
2366
2367
2368
2369
2370
2371
2372
2373 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2374 page_group_by_mobility_disabled = 1;
2375 else
2376 page_group_by_mobility_disabled = 0;
2377
2378 printk("Built %i zonelists in %s order, mobility grouping %s. "
2379 "Total pages: %ld\n",
2380 num_online_nodes(),
2381 zonelist_order_name[current_zonelist_order],
2382 page_group_by_mobility_disabled ? "off" : "on",
2383 vm_total_pages);
2384#ifdef CONFIG_NUMA
2385 printk("Policy zone: %s\n", zone_names[policy_zone]);
2386#endif
2387}
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400#define PAGES_PER_WAITQUEUE 256
2401
2402#ifndef CONFIG_MEMORY_HOTPLUG
2403static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2404{
2405 unsigned long size = 1;
2406
2407 pages /= PAGES_PER_WAITQUEUE;
2408
2409 while (size < pages)
2410 size <<= 1;
2411
2412
2413
2414
2415
2416
2417 size = min(size, 4096UL);
2418
2419 return max(size, 4UL);
2420}
2421#else
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2440{
2441 return 4096UL;
2442}
2443#endif
2444
2445
2446
2447
2448
2449
2450static inline unsigned long wait_table_bits(unsigned long size)
2451{
2452 return ffz(~size);
2453}
2454
2455#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2456
2457
2458
2459
2460
2461
2462
2463
2464static void setup_zone_migrate_reserve(struct zone *zone)
2465{
2466 unsigned long start_pfn, pfn, end_pfn;
2467 struct page *page;
2468 unsigned long reserve, block_migratetype;
2469
2470
2471 start_pfn = zone->zone_start_pfn;
2472 end_pfn = start_pfn + zone->spanned_pages;
2473 reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
2474 pageblock_order;
2475
2476 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2477 if (!pfn_valid(pfn))
2478 continue;
2479 page = pfn_to_page(pfn);
2480
2481
2482 if (page_to_nid(page) != zone_to_nid(zone))
2483 continue;
2484
2485
2486 if (PageReserved(page))
2487 continue;
2488
2489 block_migratetype = get_pageblock_migratetype(page);
2490
2491
2492 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2493 reserve--;
2494 continue;
2495 }
2496
2497
2498 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2499 set_pageblock_migratetype(page, MIGRATE_RESERVE);
2500 move_freepages_block(zone, page, MIGRATE_RESERVE);
2501 reserve--;
2502 continue;
2503 }
2504
2505
2506
2507
2508
2509 if (block_migratetype == MIGRATE_RESERVE) {
2510 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2511 move_freepages_block(zone, page, MIGRATE_MOVABLE);
2512 }
2513 }
2514}
2515
2516
2517
2518
2519
2520
2521void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2522 unsigned long start_pfn, enum memmap_context context)
2523{
2524 struct page *page;
2525 unsigned long end_pfn = start_pfn + size;
2526 unsigned long pfn;
2527 struct zone *z;
2528
2529 z = &NODE_DATA(nid)->node_zones[zone];
2530 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2531
2532
2533
2534
2535
2536 if (context == MEMMAP_EARLY) {
2537 if (!early_pfn_valid(pfn))
2538 continue;
2539 if (!early_pfn_in_nid(pfn, nid))
2540 continue;
2541 }
2542 page = pfn_to_page(pfn);
2543 set_page_links(page, zone, nid, pfn);
2544 init_page_count(page);
2545 reset_page_mapcount(page);
2546 SetPageReserved(page);
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561 if ((z->zone_start_pfn <= pfn)
2562 && (pfn < z->zone_start_pfn + z->spanned_pages)
2563 && !(pfn & (pageblock_nr_pages - 1)))
2564 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2565
2566 INIT_LIST_HEAD(&page->lru);
2567#ifdef WANT_PAGE_VIRTUAL
2568
2569 if (!is_highmem_idx(zone))
2570 set_page_address(page, __va(pfn << PAGE_SHIFT));
2571#endif
2572 }
2573}
2574
2575static void __meminit zone_init_free_lists(struct zone *zone)
2576{
2577 int order, t;
2578 for_each_migratetype_order(order, t) {
2579 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
2580 zone->free_area[order].nr_free = 0;
2581 }
2582}
2583
2584#ifndef __HAVE_ARCH_MEMMAP_INIT
2585#define memmap_init(size, nid, zone, start_pfn) \
2586 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
2587#endif
2588
2589static int zone_batchsize(struct zone *zone)
2590{
2591 int batch;
2592
2593
2594
2595
2596
2597
2598
2599 batch = zone->present_pages / 1024;
2600 if (batch * PAGE_SIZE > 512 * 1024)
2601 batch = (512 * 1024) / PAGE_SIZE;
2602 batch /= 4;
2603 if (batch < 1)
2604 batch = 1;
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616 batch = (1 << (fls(batch + batch/2)-1)) - 1;
2617
2618 return batch;
2619}
2620
2621inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2622{
2623 struct per_cpu_pages *pcp;
2624
2625 memset(p, 0, sizeof(*p));
2626
2627 pcp = &p->pcp;
2628 pcp->count = 0;
2629 pcp->high = 6 * batch;
2630 pcp->batch = max(1UL, 1 * batch);
2631 INIT_LIST_HEAD(&pcp->list);
2632}
2633
2634
2635
2636
2637
2638
2639static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2640 unsigned long high)
2641{
2642 struct per_cpu_pages *pcp;
2643
2644 pcp = &p->pcp;
2645 pcp->high = high;
2646 pcp->batch = max(1UL, high/4);
2647 if ((high/4) > (PAGE_SHIFT * 8))
2648 pcp->batch = PAGE_SHIFT * 8;
2649}
2650
2651
2652#ifdef CONFIG_NUMA
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670static struct per_cpu_pageset boot_pageset[NR_CPUS];
2671
2672
2673
2674
2675
2676static int __cpuinit process_zones(int cpu)
2677{
2678 struct zone *zone, *dzone;
2679 int node = cpu_to_node(cpu);
2680
2681 node_set_state(node, N_CPU);
2682
2683 for_each_zone(zone) {
2684
2685 if (!populated_zone(zone))
2686 continue;
2687
2688 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
2689 GFP_KERNEL, node);
2690 if (!zone_pcp(zone, cpu))
2691 goto bad;
2692
2693 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
2694
2695 if (percpu_pagelist_fraction)
2696 setup_pagelist_highmark(zone_pcp(zone, cpu),
2697 (zone->present_pages / percpu_pagelist_fraction));
2698 }
2699
2700 return 0;
2701bad:
2702 for_each_zone(dzone) {
2703 if (!populated_zone(dzone))
2704 continue;
2705 if (dzone == zone)
2706 break;
2707 kfree(zone_pcp(dzone, cpu));
2708 zone_pcp(dzone, cpu) = NULL;
2709 }
2710 return -ENOMEM;
2711}
2712
2713static inline void free_zone_pagesets(int cpu)
2714{
2715 struct zone *zone;
2716
2717 for_each_zone(zone) {
2718 struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
2719
2720
2721 if (pset != &boot_pageset[cpu])
2722 kfree(pset);
2723 zone_pcp(zone, cpu) = NULL;
2724 }
2725}
2726
2727static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
2728 unsigned long action,
2729 void *hcpu)
2730{
2731 int cpu = (long)hcpu;
2732 int ret = NOTIFY_OK;
2733
2734 switch (action) {
2735 case CPU_UP_PREPARE:
2736 case CPU_UP_PREPARE_FROZEN:
2737 if (process_zones(cpu))
2738 ret = NOTIFY_BAD;
2739 break;
2740 case CPU_UP_CANCELED:
2741 case CPU_UP_CANCELED_FROZEN:
2742 case CPU_DEAD:
2743 case CPU_DEAD_FROZEN:
2744 free_zone_pagesets(cpu);
2745 break;
2746 default:
2747 break;
2748 }
2749 return ret;
2750}
2751
2752static struct notifier_block __cpuinitdata pageset_notifier =
2753 { &pageset_cpuup_callback, NULL, 0 };
2754
2755void __init setup_per_cpu_pageset(void)
2756{
2757 int err;
2758
2759
2760
2761
2762
2763 err = process_zones(smp_processor_id());
2764 BUG_ON(err);
2765 register_cpu_notifier(&pageset_notifier);
2766}
2767
2768#endif
2769
2770static noinline __init_refok
2771int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
2772{
2773 int i;
2774 struct pglist_data *pgdat = zone->zone_pgdat;
2775 size_t alloc_size;
2776
2777
2778
2779
2780
2781 zone->wait_table_hash_nr_entries =
2782 wait_table_hash_nr_entries(zone_size_pages);
2783 zone->wait_table_bits =
2784 wait_table_bits(zone->wait_table_hash_nr_entries);
2785 alloc_size = zone->wait_table_hash_nr_entries
2786 * sizeof(wait_queue_head_t);
2787
2788 if (!slab_is_available()) {
2789 zone->wait_table = (wait_queue_head_t *)
2790 alloc_bootmem_node(pgdat, alloc_size);
2791 } else {
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802 zone->wait_table = vmalloc(alloc_size);
2803 }
2804 if (!zone->wait_table)
2805 return -ENOMEM;
2806
2807 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
2808 init_waitqueue_head(zone->wait_table + i);
2809
2810 return 0;
2811}
2812
2813static __meminit void zone_pcp_init(struct zone *zone)
2814{
2815 int cpu;
2816 unsigned long batch = zone_batchsize(zone);
2817
2818 for (cpu = 0; cpu < NR_CPUS; cpu++) {
2819#ifdef CONFIG_NUMA
2820
2821 zone_pcp(zone, cpu) = &boot_pageset[cpu];
2822 setup_pageset(&boot_pageset[cpu],0);
2823#else
2824 setup_pageset(zone_pcp(zone,cpu), batch);
2825#endif
2826 }
2827 if (zone->present_pages)
2828 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
2829 zone->name, zone->present_pages, batch);
2830}
2831
2832__meminit int init_currently_empty_zone(struct zone *zone,
2833 unsigned long zone_start_pfn,
2834 unsigned long size,
2835 enum memmap_context context)
2836{
2837 struct pglist_data *pgdat = zone->zone_pgdat;
2838 int ret;
2839 ret = zone_wait_table_init(zone, size);
2840 if (ret)
2841 return ret;
2842 pgdat->nr_zones = zone_idx(zone) + 1;
2843
2844 zone->zone_start_pfn = zone_start_pfn;
2845
2846 zone_init_free_lists(zone);
2847
2848 return 0;
2849}
2850
2851#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2852
2853
2854
2855
2856static int __meminit first_active_region_index_in_nid(int nid)
2857{
2858 int i;
2859
2860 for (i = 0; i < nr_nodemap_entries; i++)
2861 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
2862 return i;
2863
2864 return -1;
2865}
2866
2867
2868
2869
2870
2871static int __meminit next_active_region_index_in_nid(int index, int nid)
2872{
2873 for (index = index + 1; index < nr_nodemap_entries; index++)
2874 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
2875 return index;
2876
2877 return -1;
2878}
2879
2880#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
2881
2882
2883
2884
2885
2886
2887int __meminit early_pfn_to_nid(unsigned long pfn)
2888{
2889 int i;
2890
2891 for (i = 0; i < nr_nodemap_entries; i++) {
2892 unsigned long start_pfn = early_node_map[i].start_pfn;
2893 unsigned long end_pfn = early_node_map[i].end_pfn;
2894
2895 if (start_pfn <= pfn && pfn < end_pfn)
2896 return early_node_map[i].nid;
2897 }
2898
2899 return 0;
2900}
2901#endif
2902
2903
2904#define for_each_active_range_index_in_nid(i, nid) \
2905 for (i = first_active_region_index_in_nid(nid); i != -1; \
2906 i = next_active_region_index_in_nid(i, nid))
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917void __init free_bootmem_with_active_regions(int nid,
2918 unsigned long max_low_pfn)
2919{
2920 int i;
2921
2922 for_each_active_range_index_in_nid(i, nid) {
2923 unsigned long size_pages = 0;
2924 unsigned long end_pfn = early_node_map[i].end_pfn;
2925
2926 if (early_node_map[i].start_pfn >= max_low_pfn)
2927 continue;
2928
2929 if (end_pfn > max_low_pfn)
2930 end_pfn = max_low_pfn;
2931
2932 size_pages = end_pfn - early_node_map[i].start_pfn;
2933 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
2934 PFN_PHYS(early_node_map[i].start_pfn),
2935 size_pages << PAGE_SHIFT);
2936 }
2937}
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947void __init sparse_memory_present_with_active_regions(int nid)
2948{
2949 int i;
2950
2951 for_each_active_range_index_in_nid(i, nid)
2952 memory_present(early_node_map[i].nid,
2953 early_node_map[i].start_pfn,
2954 early_node_map[i].end_pfn);
2955}
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
2970void __init push_node_boundaries(unsigned int nid,
2971 unsigned long start_pfn, unsigned long end_pfn)
2972{
2973 printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n",
2974 nid, start_pfn, end_pfn);
2975
2976
2977 if (node_boundary_end_pfn[nid] == 0)
2978 node_boundary_start_pfn[nid] = -1UL;
2979
2980
2981 if (node_boundary_start_pfn[nid] > start_pfn)
2982 node_boundary_start_pfn[nid] = start_pfn;
2983 if (node_boundary_end_pfn[nid] < end_pfn)
2984 node_boundary_end_pfn[nid] = end_pfn;
2985}
2986
2987
2988static void __meminit account_node_boundary(unsigned int nid,
2989 unsigned long *start_pfn, unsigned long *end_pfn)
2990{
2991 printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
2992 nid, *start_pfn, *end_pfn);
2993
2994
2995 if (node_boundary_end_pfn[nid] == 0)
2996 return;
2997
2998
2999 if (node_boundary_start_pfn[nid] < *start_pfn)
3000 *start_pfn = node_boundary_start_pfn[nid];
3001 if (node_boundary_end_pfn[nid] > *end_pfn)
3002 *end_pfn = node_boundary_end_pfn[nid];
3003}
3004#else
3005void __init push_node_boundaries(unsigned int nid,
3006 unsigned long start_pfn, unsigned long end_pfn) {}
3007
3008static void __meminit account_node_boundary(unsigned int nid,
3009 unsigned long *start_pfn, unsigned long *end_pfn) {}
3010#endif
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024void __meminit get_pfn_range_for_nid(unsigned int nid,
3025 unsigned long *start_pfn, unsigned long *end_pfn)
3026{
3027 int i;
3028 *start_pfn = -1UL;
3029 *end_pfn = 0;
3030
3031 for_each_active_range_index_in_nid(i, nid) {
3032 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3033 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3034 }
3035
3036 if (*start_pfn == -1UL)
3037 *start_pfn = 0;
3038
3039
3040 account_node_boundary(nid, start_pfn, end_pfn);
3041}
3042
3043
3044
3045
3046
3047
3048void __init find_usable_zone_for_movable(void)
3049{
3050 int zone_index;
3051 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3052 if (zone_index == ZONE_MOVABLE)
3053 continue;
3054
3055 if (arch_zone_highest_possible_pfn[zone_index] >
3056 arch_zone_lowest_possible_pfn[zone_index])
3057 break;
3058 }
3059
3060 VM_BUG_ON(zone_index == -1);
3061 movable_zone = zone_index;
3062}
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074void __meminit adjust_zone_range_for_zone_movable(int nid,
3075 unsigned long zone_type,
3076 unsigned long node_start_pfn,
3077 unsigned long node_end_pfn,
3078 unsigned long *zone_start_pfn,
3079 unsigned long *zone_end_pfn)
3080{
3081
3082 if (zone_movable_pfn[nid]) {
3083
3084 if (zone_type == ZONE_MOVABLE) {
3085 *zone_start_pfn = zone_movable_pfn[nid];
3086 *zone_end_pfn = min(node_end_pfn,
3087 arch_zone_highest_possible_pfn[movable_zone]);
3088
3089
3090 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3091 *zone_end_pfn > zone_movable_pfn[nid]) {
3092 *zone_end_pfn = zone_movable_pfn[nid];
3093
3094
3095 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3096 *zone_start_pfn = *zone_end_pfn;
3097 }
3098}
3099
3100
3101
3102
3103
3104static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3105 unsigned long zone_type,
3106 unsigned long *ignored)
3107{
3108 unsigned long node_start_pfn, node_end_pfn;
3109 unsigned long zone_start_pfn, zone_end_pfn;
3110
3111
3112 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3113 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3114 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3115 adjust_zone_range_for_zone_movable(nid, zone_type,
3116 node_start_pfn, node_end_pfn,
3117 &zone_start_pfn, &zone_end_pfn);
3118
3119
3120 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3121 return 0;
3122
3123
3124 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3125 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3126
3127
3128 return zone_end_pfn - zone_start_pfn;
3129}
3130
3131
3132
3133
3134
3135unsigned long __meminit __absent_pages_in_range(int nid,
3136 unsigned long range_start_pfn,
3137 unsigned long range_end_pfn)
3138{
3139 int i = 0;
3140 unsigned long prev_end_pfn = 0, hole_pages = 0;
3141 unsigned long start_pfn;
3142
3143
3144 i = first_active_region_index_in_nid(nid);
3145 if (i == -1)
3146 return 0;
3147
3148 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3149
3150
3151 if (early_node_map[i].start_pfn > range_start_pfn)
3152 hole_pages = prev_end_pfn - range_start_pfn;
3153
3154
3155 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3156
3157
3158 if (prev_end_pfn >= range_end_pfn)
3159 break;
3160
3161
3162 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3163 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3164
3165
3166 if (start_pfn > range_start_pfn) {
3167 BUG_ON(prev_end_pfn > start_pfn);
3168 hole_pages += start_pfn - prev_end_pfn;
3169 }
3170 prev_end_pfn = early_node_map[i].end_pfn;
3171 }
3172
3173
3174 if (range_end_pfn > prev_end_pfn)
3175 hole_pages += range_end_pfn -
3176 max(range_start_pfn, prev_end_pfn);
3177
3178 return hole_pages;
3179}
3180
3181
3182
3183
3184
3185
3186
3187
3188unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3189 unsigned long end_pfn)
3190{
3191 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3192}
3193
3194
3195static unsigned long __meminit zone_absent_pages_in_node(int nid,
3196 unsigned long zone_type,
3197 unsigned long *ignored)
3198{
3199 unsigned long node_start_pfn, node_end_pfn;
3200 unsigned long zone_start_pfn, zone_end_pfn;
3201
3202 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3203 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3204 node_start_pfn);
3205 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3206 node_end_pfn);
3207
3208 adjust_zone_range_for_zone_movable(nid, zone_type,
3209 node_start_pfn, node_end_pfn,
3210 &zone_start_pfn, &zone_end_pfn);
3211 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3212}
3213
3214#else
3215static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3216 unsigned long zone_type,
3217 unsigned long *zones_size)
3218{
3219 return zones_size[zone_type];
3220}
3221
3222static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3223 unsigned long zone_type,
3224 unsigned long *zholes_size)
3225{
3226 if (!zholes_size)
3227 return 0;
3228
3229 return zholes_size[zone_type];
3230}
3231
3232#endif
3233
3234static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3235 unsigned long *zones_size, unsigned long *zholes_size)
3236{
3237 unsigned long realtotalpages, totalpages = 0;
3238 enum zone_type i;
3239
3240 for (i = 0; i < MAX_NR_ZONES; i++)
3241 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3242 zones_size);
3243 pgdat->node_spanned_pages = totalpages;
3244
3245 realtotalpages = totalpages;
3246 for (i = 0; i < MAX_NR_ZONES; i++)
3247 realtotalpages -=
3248 zone_absent_pages_in_node(pgdat->node_id, i,
3249 zholes_size);
3250 pgdat->node_present_pages = realtotalpages;
3251 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3252 realtotalpages);
3253}
3254
3255#ifndef CONFIG_SPARSEMEM
3256
3257
3258
3259
3260
3261
3262
3263static unsigned long __init usemap_size(unsigned long zonesize)
3264{
3265 unsigned long usemapsize;
3266
3267 usemapsize = roundup(zonesize, pageblock_nr_pages);
3268 usemapsize = usemapsize >> pageblock_order;
3269 usemapsize *= NR_PAGEBLOCK_BITS;
3270 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3271
3272 return usemapsize / 8;
3273}
3274
3275static void __init setup_usemap(struct pglist_data *pgdat,
3276 struct zone *zone, unsigned long zonesize)
3277{
3278 unsigned long usemapsize = usemap_size(zonesize);
3279 zone->pageblock_flags = NULL;
3280 if (usemapsize) {
3281 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3282 memset(zone->pageblock_flags, 0, usemapsize);
3283 }
3284}
3285#else
3286static void inline setup_usemap(struct pglist_data *pgdat,
3287 struct zone *zone, unsigned long zonesize) {}
3288#endif
3289
3290#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3291
3292
3293static inline int pageblock_default_order(void)
3294{
3295 if (HPAGE_SHIFT > PAGE_SHIFT)
3296 return HUGETLB_PAGE_ORDER;
3297
3298 return MAX_ORDER-1;
3299}
3300
3301
3302static inline void __init set_pageblock_order(unsigned int order)
3303{
3304
3305 if (pageblock_order)
3306 return;
3307
3308
3309
3310
3311
3312 pageblock_order = order;
3313}
3314#else
3315
3316
3317
3318
3319
3320
3321
3322static inline int pageblock_default_order(unsigned int order)
3323{
3324 return MAX_ORDER-1;
3325}
3326#define set_pageblock_order(x) do {} while (0)
3327
3328#endif
3329
3330
3331
3332
3333
3334
3335
3336static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3337 unsigned long *zones_size, unsigned long *zholes_size)
3338{
3339 enum zone_type j;
3340 int nid = pgdat->node_id;
3341 unsigned long zone_start_pfn = pgdat->node_start_pfn;
3342 int ret;
3343
3344 pgdat_resize_init(pgdat);
3345 pgdat->nr_zones = 0;
3346 init_waitqueue_head(&pgdat->kswapd_wait);
3347 pgdat->kswapd_max_order = 0;
3348
3349 for (j = 0; j < MAX_NR_ZONES; j++) {
3350 struct zone *zone = pgdat->node_zones + j;
3351 unsigned long size, realsize, memmap_pages;
3352
3353 size = zone_spanned_pages_in_node(nid, j, zones_size);
3354 realsize = size - zone_absent_pages_in_node(nid, j,
3355 zholes_size);
3356
3357
3358
3359
3360
3361
3362 memmap_pages =
3363 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3364 if (realsize >= memmap_pages) {
3365 realsize -= memmap_pages;
3366 printk(KERN_DEBUG
3367 " %s zone: %lu pages used for memmap\n",
3368 zone_names[j], memmap_pages);
3369 } else
3370 printk(KERN_WARNING
3371 " %s zone: %lu pages exceeds realsize %lu\n",
3372 zone_names[j], memmap_pages, realsize);
3373
3374
3375 if (j == 0 && realsize > dma_reserve) {
3376 realsize -= dma_reserve;
3377 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
3378 zone_names[0], dma_reserve);
3379 }
3380
3381 if (!is_highmem_idx(j))
3382 nr_kernel_pages += realsize;
3383 nr_all_pages += realsize;
3384
3385 zone->spanned_pages = size;
3386 zone->present_pages = realsize;
3387#ifdef CONFIG_NUMA
3388 zone->node = nid;
3389 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3390 / 100;
3391 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3392#endif
3393 zone->name = zone_names[j];
3394 spin_lock_init(&zone->lock);
3395 spin_lock_init(&zone->lru_lock);
3396 zone_seqlock_init(zone);
3397 zone->zone_pgdat = pgdat;
3398
3399 zone->prev_priority = DEF_PRIORITY;
3400
3401 zone_pcp_init(zone);
3402 INIT_LIST_HEAD(&zone->active_list);
3403 INIT_LIST_HEAD(&zone->inactive_list);
3404 zone->nr_scan_active = 0;
3405 zone->nr_scan_inactive = 0;
3406 zap_zone_vm_stats(zone);
3407 zone->flags = 0;
3408 if (!size)
3409 continue;
3410
3411 set_pageblock_order(pageblock_default_order());
3412 setup_usemap(pgdat, zone, size);
3413 ret = init_currently_empty_zone(zone, zone_start_pfn,
3414 size, MEMMAP_EARLY);
3415 BUG_ON(ret);
3416 memmap_init(size, nid, j, zone_start_pfn);
3417 zone_start_pfn += size;
3418 }
3419}
3420
3421static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3422{
3423
3424 if (!pgdat->node_spanned_pages)
3425 return;
3426
3427#ifdef CONFIG_FLAT_NODE_MEM_MAP
3428
3429 if (!pgdat->node_mem_map) {
3430 unsigned long size, start, end;
3431 struct page *map;
3432
3433
3434
3435
3436
3437
3438 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3439 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3440 end = ALIGN(end, MAX_ORDER_NR_PAGES);
3441 size = (end - start) * sizeof(struct page);
3442 map = alloc_remap(pgdat->node_id, size);
3443 if (!map)
3444 map = alloc_bootmem_node(pgdat, size);
3445 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3446 }
3447#ifndef CONFIG_NEED_MULTIPLE_NODES
3448
3449
3450
3451 if (pgdat == NODE_DATA(0)) {
3452 mem_map = NODE_DATA(0)->node_mem_map;
3453#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3454 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3455 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3456#endif
3457 }
3458#endif
3459#endif
3460}
3461
3462void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat,
3463 unsigned long *zones_size, unsigned long node_start_pfn,
3464 unsigned long *zholes_size)
3465{
3466 pgdat->node_id = nid;
3467 pgdat->node_start_pfn = node_start_pfn;
3468 calculate_node_totalpages(pgdat, zones_size, zholes_size);
3469
3470 alloc_node_mem_map(pgdat);
3471
3472 free_area_init_core(pgdat, zones_size, zholes_size);
3473}
3474
3475#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3476
3477#if MAX_NUMNODES > 1
3478
3479
3480
3481static void __init setup_nr_node_ids(void)
3482{
3483 unsigned int node;
3484 unsigned int highest = 0;
3485
3486 for_each_node_mask(node, node_possible_map)
3487 highest = node;
3488 nr_node_ids = highest + 1;
3489}
3490#else
3491static inline void setup_nr_node_ids(void)
3492{
3493}
3494#endif
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3509 unsigned long end_pfn)
3510{
3511 int i;
3512
3513 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) "
3514 "%d entries of %d used\n",
3515 nid, start_pfn, end_pfn,
3516 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3517
3518
3519 for (i = 0; i < nr_nodemap_entries; i++) {
3520 if (early_node_map[i].nid != nid)
3521 continue;
3522
3523
3524 if (start_pfn >= early_node_map[i].start_pfn &&
3525 end_pfn <= early_node_map[i].end_pfn)
3526 return;
3527
3528
3529 if (start_pfn <= early_node_map[i].end_pfn &&
3530 end_pfn > early_node_map[i].end_pfn) {
3531 early_node_map[i].end_pfn = end_pfn;
3532 return;
3533 }
3534
3535
3536 if (start_pfn < early_node_map[i].end_pfn &&
3537 end_pfn >= early_node_map[i].start_pfn) {
3538 early_node_map[i].start_pfn = start_pfn;
3539 return;
3540 }
3541 }
3542
3543
3544 if (i >= MAX_ACTIVE_REGIONS) {
3545 printk(KERN_CRIT "More than %d memory regions, truncating\n",
3546 MAX_ACTIVE_REGIONS);
3547 return;
3548 }
3549
3550 early_node_map[i].nid = nid;
3551 early_node_map[i].start_pfn = start_pfn;
3552 early_node_map[i].end_pfn = end_pfn;
3553 nr_nodemap_entries = i + 1;
3554}
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
3568 unsigned long new_end_pfn)
3569{
3570 int i;
3571
3572
3573 for_each_active_range_index_in_nid(i, nid)
3574 if (early_node_map[i].end_pfn == old_end_pfn) {
3575 early_node_map[i].end_pfn = new_end_pfn;
3576 break;
3577 }
3578}
3579
3580
3581
3582
3583
3584
3585
3586
3587void __init remove_all_active_ranges(void)
3588{
3589 memset(early_node_map, 0, sizeof(early_node_map));
3590 nr_nodemap_entries = 0;
3591#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
3592 memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
3593 memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
3594#endif
3595}
3596
3597
3598static int __init cmp_node_active_region(const void *a, const void *b)
3599{
3600 struct node_active_region *arange = (struct node_active_region *)a;
3601 struct node_active_region *brange = (struct node_active_region *)b;
3602
3603
3604 if (arange->start_pfn > brange->start_pfn)
3605 return 1;
3606 if (arange->start_pfn < brange->start_pfn)
3607 return -1;
3608
3609 return 0;
3610}
3611
3612
3613static void __init sort_node_map(void)
3614{
3615 sort(early_node_map, (size_t)nr_nodemap_entries,
3616 sizeof(struct node_active_region),
3617 cmp_node_active_region, NULL);
3618}
3619
3620
3621unsigned long __init find_min_pfn_for_node(unsigned long nid)
3622{
3623 int i;
3624 unsigned long min_pfn = ULONG_MAX;
3625
3626
3627 for_each_active_range_index_in_nid(i, nid)
3628 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
3629
3630 if (min_pfn == ULONG_MAX) {
3631 printk(KERN_WARNING
3632 "Could not find start_pfn for node %lu\n", nid);
3633 return 0;
3634 }
3635
3636 return min_pfn;
3637}
3638
3639
3640
3641
3642
3643
3644
3645unsigned long __init find_min_pfn_with_active_regions(void)
3646{
3647 return find_min_pfn_for_node(MAX_NUMNODES);
3648}
3649
3650
3651
3652
3653
3654
3655
3656unsigned long __init find_max_pfn_with_active_regions(void)
3657{
3658 int i;
3659 unsigned long max_pfn = 0;
3660
3661 for (i = 0; i < nr_nodemap_entries; i++)
3662 max_pfn = max(max_pfn, early_node_map[i].end_pfn);
3663
3664 return max_pfn;
3665}
3666
3667
3668
3669
3670
3671
3672static unsigned long __init early_calculate_totalpages(void)
3673{
3674 int i;
3675 unsigned long totalpages = 0;
3676
3677 for (i = 0; i < nr_nodemap_entries; i++) {
3678 unsigned long pages = early_node_map[i].end_pfn -
3679 early_node_map[i].start_pfn;
3680 totalpages += pages;
3681 if (pages)
3682 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
3683 }
3684 return totalpages;
3685}
3686
3687
3688
3689
3690
3691
3692
3693void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3694{
3695 int i, nid;
3696 unsigned long usable_startpfn;
3697 unsigned long kernelcore_node, kernelcore_remaining;
3698 unsigned long totalpages = early_calculate_totalpages();
3699 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709 if (required_movablecore) {
3710 unsigned long corepages;
3711
3712
3713
3714
3715
3716 required_movablecore =
3717 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
3718 corepages = totalpages - required_movablecore;
3719
3720 required_kernelcore = max(required_kernelcore, corepages);
3721 }
3722
3723
3724 if (!required_kernelcore)
3725 return;
3726
3727
3728 find_usable_zone_for_movable();
3729 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
3730
3731restart:
3732
3733 kernelcore_node = required_kernelcore / usable_nodes;
3734 for_each_node_state(nid, N_HIGH_MEMORY) {
3735
3736
3737
3738
3739
3740 if (required_kernelcore < kernelcore_node)
3741 kernelcore_node = required_kernelcore / usable_nodes;
3742
3743
3744
3745
3746
3747
3748 kernelcore_remaining = kernelcore_node;
3749
3750
3751 for_each_active_range_index_in_nid(i, nid) {
3752 unsigned long start_pfn, end_pfn;
3753 unsigned long size_pages;
3754
3755 start_pfn = max(early_node_map[i].start_pfn,
3756 zone_movable_pfn[nid]);
3757 end_pfn = early_node_map[i].end_pfn;
3758 if (start_pfn >= end_pfn)
3759 continue;
3760
3761
3762 if (start_pfn < usable_startpfn) {
3763 unsigned long kernel_pages;
3764 kernel_pages = min(end_pfn, usable_startpfn)
3765 - start_pfn;
3766
3767 kernelcore_remaining -= min(kernel_pages,
3768 kernelcore_remaining);
3769 required_kernelcore -= min(kernel_pages,
3770 required_kernelcore);
3771
3772
3773 if (end_pfn <= usable_startpfn) {
3774
3775
3776
3777
3778
3779
3780
3781 zone_movable_pfn[nid] = end_pfn;
3782 continue;
3783 }
3784 start_pfn = usable_startpfn;
3785 }
3786
3787
3788
3789
3790
3791
3792 size_pages = end_pfn - start_pfn;
3793 if (size_pages > kernelcore_remaining)
3794 size_pages = kernelcore_remaining;
3795 zone_movable_pfn[nid] = start_pfn + size_pages;
3796
3797
3798
3799
3800
3801
3802 required_kernelcore -= min(required_kernelcore,
3803 size_pages);
3804 kernelcore_remaining -= size_pages;
3805 if (!kernelcore_remaining)
3806 break;
3807 }
3808 }
3809
3810
3811
3812
3813
3814
3815
3816 usable_nodes--;
3817 if (usable_nodes && required_kernelcore > usable_nodes)
3818 goto restart;
3819
3820
3821 for (nid = 0; nid < MAX_NUMNODES; nid++)
3822 zone_movable_pfn[nid] =
3823 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
3824}
3825
3826
3827static void check_for_regular_memory(pg_data_t *pgdat)
3828{
3829#ifdef CONFIG_HIGHMEM
3830 enum zone_type zone_type;
3831
3832 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
3833 struct zone *zone = &pgdat->node_zones[zone_type];
3834 if (zone->present_pages)
3835 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
3836 }
3837#endif
3838}
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3854{
3855 unsigned long nid;
3856 enum zone_type i;
3857
3858
3859 sort_node_map();
3860
3861
3862 memset(arch_zone_lowest_possible_pfn, 0,
3863 sizeof(arch_zone_lowest_possible_pfn));
3864 memset(arch_zone_highest_possible_pfn, 0,
3865 sizeof(arch_zone_highest_possible_pfn));
3866 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
3867 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
3868 for (i = 1; i < MAX_NR_ZONES; i++) {
3869 if (i == ZONE_MOVABLE)
3870 continue;
3871 arch_zone_lowest_possible_pfn[i] =
3872 arch_zone_highest_possible_pfn[i-1];
3873 arch_zone_highest_possible_pfn[i] =
3874 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
3875 }
3876 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
3877 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
3878
3879
3880 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
3881 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
3882
3883
3884 printk("Zone PFN ranges:\n");
3885 for (i = 0; i < MAX_NR_ZONES; i++) {
3886 if (i == ZONE_MOVABLE)
3887 continue;
3888 printk(" %-8s %8lu -> %8lu\n",
3889 zone_names[i],
3890 arch_zone_lowest_possible_pfn[i],
3891 arch_zone_highest_possible_pfn[i]);
3892 }
3893
3894
3895 printk("Movable zone start PFN for each node\n");
3896 for (i = 0; i < MAX_NUMNODES; i++) {
3897 if (zone_movable_pfn[i])
3898 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
3899 }
3900
3901
3902 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
3903 for (i = 0; i < nr_nodemap_entries; i++)
3904 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid,
3905 early_node_map[i].start_pfn,
3906 early_node_map[i].end_pfn);
3907
3908
3909 setup_nr_node_ids();
3910 for_each_online_node(nid) {
3911 pg_data_t *pgdat = NODE_DATA(nid);
3912 free_area_init_node(nid, pgdat, NULL,
3913 find_min_pfn_for_node(nid), NULL);
3914
3915
3916 if (pgdat->node_present_pages)
3917 node_set_state(nid, N_HIGH_MEMORY);
3918 check_for_regular_memory(pgdat);
3919 }
3920}
3921
3922static int __init cmdline_parse_core(char *p, unsigned long *core)
3923{
3924 unsigned long long coremem;
3925 if (!p)
3926 return -EINVAL;
3927
3928 coremem = memparse(p, &p);
3929 *core = coremem >> PAGE_SHIFT;
3930
3931
3932 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
3933
3934 return 0;
3935}
3936
3937
3938
3939
3940
3941static int __init cmdline_parse_kernelcore(char *p)
3942{
3943 return cmdline_parse_core(p, &required_kernelcore);
3944}
3945
3946
3947
3948
3949
3950static int __init cmdline_parse_movablecore(char *p)
3951{
3952 return cmdline_parse_core(p, &required_movablecore);
3953}
3954
3955early_param("kernelcore", cmdline_parse_kernelcore);
3956early_param("movablecore", cmdline_parse_movablecore);
3957
3958#endif
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971void __init set_dma_reserve(unsigned long new_dma_reserve)
3972{
3973 dma_reserve = new_dma_reserve;
3974}
3975
3976#ifndef CONFIG_NEED_MULTIPLE_NODES
3977static bootmem_data_t contig_bootmem_data;
3978struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
3979
3980EXPORT_SYMBOL(contig_page_data);
3981#endif
3982
3983void __init free_area_init(unsigned long *zones_size)
3984{
3985 free_area_init_node(0, NODE_DATA(0), zones_size,
3986 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
3987}
3988
3989static int page_alloc_cpu_notify(struct notifier_block *self,
3990 unsigned long action, void *hcpu)
3991{
3992 int cpu = (unsigned long)hcpu;
3993
3994 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
3995 drain_pages(cpu);
3996
3997
3998
3999
4000
4001
4002
4003 vm_events_fold_cpu(cpu);
4004
4005
4006
4007
4008
4009
4010
4011
4012 refresh_cpu_vm_stats(cpu);
4013 }
4014 return NOTIFY_OK;
4015}
4016
4017void __init page_alloc_init(void)
4018{
4019 hotcpu_notifier(page_alloc_cpu_notify, 0);
4020}
4021
4022
4023
4024
4025
4026static void calculate_totalreserve_pages(void)
4027{
4028 struct pglist_data *pgdat;
4029 unsigned long reserve_pages = 0;
4030 enum zone_type i, j;
4031
4032 for_each_online_pgdat(pgdat) {
4033 for (i = 0; i < MAX_NR_ZONES; i++) {
4034 struct zone *zone = pgdat->node_zones + i;
4035 unsigned long max = 0;
4036
4037
4038 for (j = i; j < MAX_NR_ZONES; j++) {
4039 if (zone->lowmem_reserve[j] > max)
4040 max = zone->lowmem_reserve[j];
4041 }
4042
4043
4044 max += zone->pages_high;
4045
4046 if (max > zone->present_pages)
4047 max = zone->present_pages;
4048 reserve_pages += max;
4049 }
4050 }
4051 totalreserve_pages = reserve_pages;
4052}
4053
4054
4055
4056
4057
4058
4059
4060static void setup_per_zone_lowmem_reserve(void)
4061{
4062 struct pglist_data *pgdat;
4063 enum zone_type j, idx;
4064
4065 for_each_online_pgdat(pgdat) {
4066 for (j = 0; j < MAX_NR_ZONES; j++) {
4067 struct zone *zone = pgdat->node_zones + j;
4068 unsigned long present_pages = zone->present_pages;
4069
4070 zone->lowmem_reserve[j] = 0;
4071
4072 idx = j;
4073 while (idx) {
4074 struct zone *lower_zone;
4075
4076 idx--;
4077
4078 if (sysctl_lowmem_reserve_ratio[idx] < 1)
4079 sysctl_lowmem_reserve_ratio[idx] = 1;
4080
4081 lower_zone = pgdat->node_zones + idx;
4082 lower_zone->lowmem_reserve[j] = present_pages /
4083 sysctl_lowmem_reserve_ratio[idx];
4084 present_pages += lower_zone->present_pages;
4085 }
4086 }
4087 }
4088
4089
4090 calculate_totalreserve_pages();
4091}
4092
4093
4094
4095
4096
4097
4098
4099void setup_per_zone_pages_min(void)
4100{
4101 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4102 unsigned long lowmem_pages = 0;
4103 struct zone *zone;
4104 unsigned long flags;
4105
4106
4107 for_each_zone(zone) {
4108 if (!is_highmem(zone))
4109 lowmem_pages += zone->present_pages;
4110 }
4111
4112 for_each_zone(zone) {
4113 u64 tmp;
4114
4115 spin_lock_irqsave(&zone->lru_lock, flags);
4116 tmp = (u64)pages_min * zone->present_pages;
4117 do_div(tmp, lowmem_pages);
4118 if (is_highmem(zone)) {
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128 int min_pages;
4129
4130 min_pages = zone->present_pages / 1024;
4131 if (min_pages < SWAP_CLUSTER_MAX)
4132 min_pages = SWAP_CLUSTER_MAX;
4133 if (min_pages > 128)
4134 min_pages = 128;
4135 zone->pages_min = min_pages;
4136 } else {
4137
4138
4139
4140
4141 zone->pages_min = tmp;
4142 }
4143
4144 zone->pages_low = zone->pages_min + (tmp >> 2);
4145 zone->pages_high = zone->pages_min + (tmp >> 1);
4146 setup_zone_migrate_reserve(zone);
4147 spin_unlock_irqrestore(&zone->lru_lock, flags);
4148 }
4149
4150
4151 calculate_totalreserve_pages();
4152}
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178static int __init init_per_zone_pages_min(void)
4179{
4180 unsigned long lowmem_kbytes;
4181
4182 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4183
4184 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4185 if (min_free_kbytes < 128)
4186 min_free_kbytes = 128;
4187 if (min_free_kbytes > 65536)
4188 min_free_kbytes = 65536;
4189 setup_per_zone_pages_min();
4190 setup_per_zone_lowmem_reserve();
4191 return 0;
4192}
4193module_init(init_per_zone_pages_min)
4194
4195
4196
4197
4198
4199
4200int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4201 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4202{
4203 proc_dointvec(table, write, file, buffer, length, ppos);
4204 if (write)
4205 setup_per_zone_pages_min();
4206 return 0;
4207}
4208
4209#ifdef CONFIG_NUMA
4210int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4211 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4212{
4213 struct zone *zone;
4214 int rc;
4215
4216 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4217 if (rc)
4218 return rc;
4219
4220 for_each_zone(zone)
4221 zone->min_unmapped_pages = (zone->present_pages *
4222 sysctl_min_unmapped_ratio) / 100;
4223 return 0;
4224}
4225
4226int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4227 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4228{
4229 struct zone *zone;
4230 int rc;
4231
4232 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4233 if (rc)
4234 return rc;
4235
4236 for_each_zone(zone)
4237 zone->min_slab_pages = (zone->present_pages *
4238 sysctl_min_slab_ratio) / 100;
4239 return 0;
4240}
4241#endif
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4253 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4254{
4255 proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4256 setup_per_zone_lowmem_reserve();
4257 return 0;
4258}
4259
4260
4261
4262
4263
4264
4265
4266int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4267 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4268{
4269 struct zone *zone;
4270 unsigned int cpu;
4271 int ret;
4272
4273 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4274 if (!write || (ret == -EINVAL))
4275 return ret;
4276 for_each_zone(zone) {
4277 for_each_online_cpu(cpu) {
4278 unsigned long high;
4279 high = zone->present_pages / percpu_pagelist_fraction;
4280 setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4281 }
4282 }
4283 return 0;
4284}
4285
4286int hashdist = HASHDIST_DEFAULT;
4287
4288#ifdef CONFIG_NUMA
4289static int __init set_hashdist(char *str)
4290{
4291 if (!str)
4292 return 0;
4293 hashdist = simple_strtoul(str, &str, 0);
4294 return 1;
4295}
4296__setup("hashdist=", set_hashdist);
4297#endif
4298
4299
4300
4301
4302
4303
4304
4305void *__init alloc_large_system_hash(const char *tablename,
4306 unsigned long bucketsize,
4307 unsigned long numentries,
4308 int scale,
4309 int flags,
4310 unsigned int *_hash_shift,
4311 unsigned int *_hash_mask,
4312 unsigned long limit)
4313{
4314 unsigned long long max = limit;
4315 unsigned long log2qty, size;
4316 void *table = NULL;
4317
4318
4319 if (!numentries) {
4320
4321 numentries = nr_kernel_pages;
4322 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4323 numentries >>= 20 - PAGE_SHIFT;
4324 numentries <<= 20 - PAGE_SHIFT;
4325
4326
4327 if (scale > PAGE_SHIFT)
4328 numentries >>= (scale - PAGE_SHIFT);
4329 else
4330 numentries <<= (PAGE_SHIFT - scale);
4331
4332
4333 if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4334 numentries = PAGE_SIZE / bucketsize;
4335 }
4336 numentries = roundup_pow_of_two(numentries);
4337
4338
4339 if (max == 0) {
4340 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4341 do_div(max, bucketsize);
4342 }
4343
4344 if (numentries > max)
4345 numentries = max;
4346
4347 log2qty = ilog2(numentries);
4348
4349 do {
4350 size = bucketsize << log2qty;
4351 if (flags & HASH_EARLY)
4352 table = alloc_bootmem(size);
4353 else if (hashdist)
4354 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4355 else {
4356 unsigned long order = get_order(size);
4357 table = (void*) __get_free_pages(GFP_ATOMIC, order);
4358
4359
4360
4361
4362 if (table) {
4363 unsigned long alloc_end = (unsigned long)table +
4364 (PAGE_SIZE << order);
4365 unsigned long used = (unsigned long)table +
4366 PAGE_ALIGN(size);
4367 split_page(virt_to_page(table), order);
4368 while (used < alloc_end) {
4369 free_page(used);
4370 used += PAGE_SIZE;
4371 }
4372 }
4373 }
4374 } while (!table && size > PAGE_SIZE && --log2qty);
4375
4376 if (!table)
4377 panic("Failed to allocate %s hash table\n", tablename);
4378
4379 printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4380 tablename,
4381 (1U << log2qty),
4382 ilog2(size) - PAGE_SHIFT,
4383 size);
4384
4385 if (_hash_shift)
4386 *_hash_shift = log2qty;
4387 if (_hash_mask)
4388 *_hash_mask = (1 << log2qty) - 1;
4389
4390 return table;
4391}
4392
4393#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
4394struct page *pfn_to_page(unsigned long pfn)
4395{
4396 return __pfn_to_page(pfn);
4397}
4398unsigned long page_to_pfn(struct page *page)
4399{
4400 return __page_to_pfn(page);
4401}
4402EXPORT_SYMBOL(pfn_to_page);
4403EXPORT_SYMBOL(page_to_pfn);
4404#endif
4405
4406
4407static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4408 unsigned long pfn)
4409{
4410#ifdef CONFIG_SPARSEMEM
4411 return __pfn_to_section(pfn)->pageblock_flags;
4412#else
4413 return zone->pageblock_flags;
4414#endif
4415}
4416
4417static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4418{
4419#ifdef CONFIG_SPARSEMEM
4420 pfn &= (PAGES_PER_SECTION-1);
4421 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4422#else
4423 pfn = pfn - zone->zone_start_pfn;
4424 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4425#endif
4426}
4427
4428
4429
4430
4431
4432
4433
4434
4435unsigned long get_pageblock_flags_group(struct page *page,
4436 int start_bitidx, int end_bitidx)
4437{
4438 struct zone *zone;
4439 unsigned long *bitmap;
4440 unsigned long pfn, bitidx;
4441 unsigned long flags = 0;
4442 unsigned long value = 1;
4443
4444 zone = page_zone(page);
4445 pfn = page_to_pfn(page);
4446 bitmap = get_pageblock_bitmap(zone, pfn);
4447 bitidx = pfn_to_bitidx(zone, pfn);
4448
4449 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4450 if (test_bit(bitidx + start_bitidx, bitmap))
4451 flags |= value;
4452
4453 return flags;
4454}
4455
4456
4457
4458
4459
4460
4461
4462
4463void set_pageblock_flags_group(struct page *page, unsigned long flags,
4464 int start_bitidx, int end_bitidx)
4465{
4466 struct zone *zone;
4467 unsigned long *bitmap;
4468 unsigned long pfn, bitidx;
4469 unsigned long value = 1;
4470
4471 zone = page_zone(page);
4472 pfn = page_to_pfn(page);
4473 bitmap = get_pageblock_bitmap(zone, pfn);
4474 bitidx = pfn_to_bitidx(zone, pfn);
4475 VM_BUG_ON(pfn < zone->zone_start_pfn);
4476 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
4477
4478 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4479 if (flags & value)
4480 __set_bit(bitidx + start_bitidx, bitmap);
4481 else
4482 __clear_bit(bitidx + start_bitidx, bitmap);
4483}
4484
4485
4486
4487
4488
4489
4490
4491int set_migratetype_isolate(struct page *page)
4492{
4493 struct zone *zone;
4494 unsigned long flags;
4495 int ret = -EBUSY;
4496
4497 zone = page_zone(page);
4498 spin_lock_irqsave(&zone->lock, flags);
4499
4500
4501
4502 if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
4503 goto out;
4504 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
4505 move_freepages_block(zone, page, MIGRATE_ISOLATE);
4506 ret = 0;
4507out:
4508 spin_unlock_irqrestore(&zone->lock, flags);
4509 if (!ret)
4510 drain_all_pages();
4511 return ret;
4512}
4513
4514void unset_migratetype_isolate(struct page *page)
4515{
4516 struct zone *zone;
4517 unsigned long flags;
4518 zone = page_zone(page);
4519 spin_lock_irqsave(&zone->lock, flags);
4520 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
4521 goto out;
4522 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4523 move_freepages_block(zone, page, MIGRATE_MOVABLE);
4524out:
4525 spin_unlock_irqrestore(&zone->lock, flags);
4526}
4527
4528#ifdef CONFIG_MEMORY_HOTREMOVE
4529
4530
4531
4532void
4533__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
4534{
4535 struct page *page;
4536 struct zone *zone;
4537 int order, i;
4538 unsigned long pfn;
4539 unsigned long flags;
4540
4541 for (pfn = start_pfn; pfn < end_pfn; pfn++)
4542 if (pfn_valid(pfn))
4543 break;
4544 if (pfn == end_pfn)
4545 return;
4546 zone = page_zone(pfn_to_page(pfn));
4547 spin_lock_irqsave(&zone->lock, flags);
4548 pfn = start_pfn;
4549 while (pfn < end_pfn) {
4550 if (!pfn_valid(pfn)) {
4551 pfn++;
4552 continue;
4553 }
4554 page = pfn_to_page(pfn);
4555 BUG_ON(page_count(page));
4556 BUG_ON(!PageBuddy(page));
4557 order = page_order(page);
4558#ifdef CONFIG_DEBUG_VM
4559 printk(KERN_INFO "remove from free list %lx %d %lx\n",
4560 pfn, 1 << order, end_pfn);
4561#endif
4562 list_del(&page->lru);
4563 rmv_page_order(page);
4564 zone->free_area[order].nr_free--;
4565 __mod_zone_page_state(zone, NR_FREE_PAGES,
4566 - (1UL << order));
4567 for (i = 0; i < (1 << order); i++)
4568 SetPageReserved((page+i));
4569 pfn += (1 << order);
4570 }
4571 spin_unlock_irqrestore(&zone->lock, flags);
4572}
4573#endif
4574