1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/suspend.h>
28#include <linux/pagevec.h>
29#include <linux/blkdev.h>
30#include <linux/slab.h>
31#include <linux/oom.h>
32#include <linux/notifier.h>
33#include <linux/topology.h>
34#include <linux/sysctl.h>
35#include <linux/cpu.h>
36#include <linux/cpuset.h>
37#include <linux/memory_hotplug.h>
38#include <linux/nodemask.h>
39#include <linux/vmalloc.h>
40#include <linux/mempolicy.h>
41#include <linux/stop_machine.h>
42#include <linux/sort.h>
43#include <linux/pfn.h>
44#include <linux/backing-dev.h>
45#include <linux/fault-inject.h>
46#include <linux/page-isolation.h>
47#include <linux/page_cgroup.h>
48#include <linux/debugobjects.h>
49
50#include <asm/tlbflush.h>
51#include <asm/div64.h>
52#include "internal.h"
53
54
55
56
57nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
58 [N_POSSIBLE] = NODE_MASK_ALL,
59 [N_ONLINE] = { { [0] = 1UL } },
60#ifndef CONFIG_NUMA
61 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
62#ifdef CONFIG_HIGHMEM
63 [N_HIGH_MEMORY] = { { [0] = 1UL } },
64#endif
65 [N_CPU] = { { [0] = 1UL } },
66#endif
67};
68EXPORT_SYMBOL(node_states);
69
70unsigned long totalram_pages __read_mostly;
71unsigned long totalreserve_pages __read_mostly;
72unsigned long highest_memmap_pfn __read_mostly;
73int percpu_pagelist_fraction;
74
75#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
76int pageblock_order __read_mostly;
77#endif
78
79static void __free_pages_ok(struct page *page, unsigned int order);
80
81
82
83
84
85
86
87
88
89
90
91
92int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
93#ifdef CONFIG_ZONE_DMA
94 256,
95#endif
96#ifdef CONFIG_ZONE_DMA32
97 256,
98#endif
99#ifdef CONFIG_HIGHMEM
100 32,
101#endif
102 32,
103};
104
105EXPORT_SYMBOL(totalram_pages);
106
107static char * const zone_names[MAX_NR_ZONES] = {
108#ifdef CONFIG_ZONE_DMA
109 "DMA",
110#endif
111#ifdef CONFIG_ZONE_DMA32
112 "DMA32",
113#endif
114 "Normal",
115#ifdef CONFIG_HIGHMEM
116 "HighMem",
117#endif
118 "Movable",
119};
120
121int min_free_kbytes = 1024;
122
123unsigned long __meminitdata nr_kernel_pages;
124unsigned long __meminitdata nr_all_pages;
125static unsigned long __meminitdata dma_reserve;
126
127#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
128
129
130
131
132
133
134
135 #ifdef CONFIG_MAX_ACTIVE_REGIONS
136
137 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
138 #else
139 #if MAX_NUMNODES >= 32
140
141 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
142 #else
143
144 #define MAX_ACTIVE_REGIONS 256
145 #endif
146 #endif
147
148 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
149 static int __meminitdata nr_nodemap_entries;
150 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
151 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
152#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
153 static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
154 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
155#endif
156 static unsigned long __initdata required_kernelcore;
157 static unsigned long __initdata required_movablecore;
158 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
159
160
161 int movable_zone;
162 EXPORT_SYMBOL(movable_zone);
163#endif
164
165#if MAX_NUMNODES > 1
166int nr_node_ids __read_mostly = MAX_NUMNODES;
167EXPORT_SYMBOL(nr_node_ids);
168#endif
169
170int page_group_by_mobility_disabled __read_mostly;
171
172static void set_pageblock_migratetype(struct page *page, int migratetype)
173{
174 set_pageblock_flags_group(page, (unsigned long)migratetype,
175 PB_migrate, PB_migrate_end);
176}
177
178#ifdef CONFIG_DEBUG_VM
179static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
180{
181 int ret = 0;
182 unsigned seq;
183 unsigned long pfn = page_to_pfn(page);
184
185 do {
186 seq = zone_span_seqbegin(zone);
187 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
188 ret = 1;
189 else if (pfn < zone->zone_start_pfn)
190 ret = 1;
191 } while (zone_span_seqretry(zone, seq));
192
193 return ret;
194}
195
196static int page_is_consistent(struct zone *zone, struct page *page)
197{
198 if (!pfn_valid_within(page_to_pfn(page)))
199 return 0;
200 if (zone != page_zone(page))
201 return 0;
202
203 return 1;
204}
205
206
207
208static int bad_range(struct zone *zone, struct page *page)
209{
210 if (page_outside_zone_boundaries(zone, page))
211 return 1;
212 if (!page_is_consistent(zone, page))
213 return 1;
214
215 return 0;
216}
217#else
218static inline int bad_range(struct zone *zone, struct page *page)
219{
220 return 0;
221}
222#endif
223
224static void bad_page(struct page *page)
225{
226 static unsigned long resume;
227 static unsigned long nr_shown;
228 static unsigned long nr_unshown;
229
230
231
232
233
234 if (nr_shown == 60) {
235 if (time_before(jiffies, resume)) {
236 nr_unshown++;
237 goto out;
238 }
239 if (nr_unshown) {
240 printk(KERN_ALERT
241 "BUG: Bad page state: %lu messages suppressed\n",
242 nr_unshown);
243 nr_unshown = 0;
244 }
245 nr_shown = 0;
246 }
247 if (nr_shown++ == 0)
248 resume = jiffies + 60 * HZ;
249
250 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
251 current->comm, page_to_pfn(page));
252 printk(KERN_ALERT
253 "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
254 page, (void *)page->flags, page_count(page),
255 page_mapcount(page), page->mapping, page->index);
256
257 dump_stack();
258out:
259
260 __ClearPageBuddy(page);
261 add_taint(TAINT_BAD_PAGE);
262}
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279static void free_compound_page(struct page *page)
280{
281 __free_pages_ok(page, compound_order(page));
282}
283
284void prep_compound_page(struct page *page, unsigned long order)
285{
286 int i;
287 int nr_pages = 1 << order;
288
289 set_compound_page_dtor(page, free_compound_page);
290 set_compound_order(page, order);
291 __SetPageHead(page);
292 for (i = 1; i < nr_pages; i++) {
293 struct page *p = page + i;
294
295 __SetPageTail(p);
296 p->first_page = page;
297 }
298}
299
300#ifdef CONFIG_HUGETLBFS
301void prep_compound_gigantic_page(struct page *page, unsigned long order)
302{
303 int i;
304 int nr_pages = 1 << order;
305 struct page *p = page + 1;
306
307 set_compound_page_dtor(page, free_compound_page);
308 set_compound_order(page, order);
309 __SetPageHead(page);
310 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
311 __SetPageTail(p);
312 p->first_page = page;
313 }
314}
315#endif
316
317static int destroy_compound_page(struct page *page, unsigned long order)
318{
319 int i;
320 int nr_pages = 1 << order;
321 int bad = 0;
322
323 if (unlikely(compound_order(page) != order) ||
324 unlikely(!PageHead(page))) {
325 bad_page(page);
326 bad++;
327 }
328
329 __ClearPageHead(page);
330
331 for (i = 1; i < nr_pages; i++) {
332 struct page *p = page + i;
333
334 if (unlikely(!PageTail(p) || (p->first_page != page))) {
335 bad_page(page);
336 bad++;
337 }
338 __ClearPageTail(p);
339 }
340
341 return bad;
342}
343
344static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
345{
346 int i;
347
348
349
350
351
352 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
353 for (i = 0; i < (1 << order); i++)
354 clear_highpage(page + i);
355}
356
357static inline void set_page_order(struct page *page, int order)
358{
359 set_page_private(page, order);
360 __SetPageBuddy(page);
361}
362
363static inline void rmv_page_order(struct page *page)
364{
365 __ClearPageBuddy(page);
366 set_page_private(page, 0);
367}
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386static inline struct page *
387__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
388{
389 unsigned long buddy_idx = page_idx ^ (1 << order);
390
391 return page + (buddy_idx - page_idx);
392}
393
394static inline unsigned long
395__find_combined_index(unsigned long page_idx, unsigned int order)
396{
397 return (page_idx & ~(1 << order));
398}
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413static inline int page_is_buddy(struct page *page, struct page *buddy,
414 int order)
415{
416 if (!pfn_valid_within(page_to_pfn(buddy)))
417 return 0;
418
419 if (page_zone_id(page) != page_zone_id(buddy))
420 return 0;
421
422 if (PageBuddy(buddy) && page_order(buddy) == order) {
423 BUG_ON(page_count(buddy) != 0);
424 return 1;
425 }
426 return 0;
427}
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453static inline void __free_one_page(struct page *page,
454 struct zone *zone, unsigned int order)
455{
456 unsigned long page_idx;
457 int order_size = 1 << order;
458 int migratetype = get_pageblock_migratetype(page);
459
460 if (unlikely(PageCompound(page)))
461 if (unlikely(destroy_compound_page(page, order)))
462 return;
463
464 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
465
466 VM_BUG_ON(page_idx & (order_size - 1));
467 VM_BUG_ON(bad_range(zone, page));
468
469 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
470 while (order < MAX_ORDER-1) {
471 unsigned long combined_idx;
472 struct page *buddy;
473
474 buddy = __page_find_buddy(page, page_idx, order);
475 if (!page_is_buddy(page, buddy, order))
476 break;
477
478
479 list_del(&buddy->lru);
480 zone->free_area[order].nr_free--;
481 rmv_page_order(buddy);
482 combined_idx = __find_combined_index(page_idx, order);
483 page = page + (combined_idx - page_idx);
484 page_idx = combined_idx;
485 order++;
486 }
487 set_page_order(page, order);
488 list_add(&page->lru,
489 &zone->free_area[order].free_list[migratetype]);
490 zone->free_area[order].nr_free++;
491}
492
493static inline int free_pages_check(struct page *page)
494{
495 free_page_mlock(page);
496 if (unlikely(page_mapcount(page) |
497 (page->mapping != NULL) |
498 (page_count(page) != 0) |
499 (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
500 bad_page(page);
501 return 1;
502 }
503 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
504 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
505 return 0;
506}
507
508
509
510
511
512
513
514
515
516
517
518
519static void free_pages_bulk(struct zone *zone, int count,
520 struct list_head *list, int order)
521{
522 spin_lock(&zone->lock);
523 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
524 zone->pages_scanned = 0;
525 while (count--) {
526 struct page *page;
527
528 VM_BUG_ON(list_empty(list));
529 page = list_entry(list->prev, struct page, lru);
530
531 list_del(&page->lru);
532 __free_one_page(page, zone, order);
533 }
534 spin_unlock(&zone->lock);
535}
536
537static void free_one_page(struct zone *zone, struct page *page, int order)
538{
539 spin_lock(&zone->lock);
540 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
541 zone->pages_scanned = 0;
542 __free_one_page(page, zone, order);
543 spin_unlock(&zone->lock);
544}
545
546static void __free_pages_ok(struct page *page, unsigned int order)
547{
548 unsigned long flags;
549 int i;
550 int bad = 0;
551
552 for (i = 0 ; i < (1 << order) ; ++i)
553 bad += free_pages_check(page + i);
554 if (bad)
555 return;
556
557 if (!PageHighMem(page)) {
558 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
559 debug_check_no_obj_freed(page_address(page),
560 PAGE_SIZE << order);
561 }
562 arch_free_page(page, order);
563 kernel_map_pages(page, 1 << order, 0);
564
565 local_irq_save(flags);
566 __count_vm_events(PGFREE, 1 << order);
567 free_one_page(page_zone(page), page, order);
568 local_irq_restore(flags);
569}
570
571
572
573
574void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
575{
576 if (order == 0) {
577 __ClearPageReserved(page);
578 set_page_count(page, 0);
579 set_page_refcounted(page);
580 __free_page(page);
581 } else {
582 int loop;
583
584 prefetchw(page);
585 for (loop = 0; loop < BITS_PER_LONG; loop++) {
586 struct page *p = &page[loop];
587
588 if (loop + 1 < BITS_PER_LONG)
589 prefetchw(p + 1);
590 __ClearPageReserved(p);
591 set_page_count(p, 0);
592 }
593
594 set_page_refcounted(page);
595 __free_pages(page, order);
596 }
597}
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614static inline void expand(struct zone *zone, struct page *page,
615 int low, int high, struct free_area *area,
616 int migratetype)
617{
618 unsigned long size = 1 << high;
619
620 while (high > low) {
621 area--;
622 high--;
623 size >>= 1;
624 VM_BUG_ON(bad_range(zone, &page[size]));
625 list_add(&page[size].lru, &area->free_list[migratetype]);
626 area->nr_free++;
627 set_page_order(&page[size], high);
628 }
629}
630
631
632
633
634static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
635{
636 if (unlikely(page_mapcount(page) |
637 (page->mapping != NULL) |
638 (page_count(page) != 0) |
639 (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
640 bad_page(page);
641 return 1;
642 }
643
644 set_page_private(page, 0);
645 set_page_refcounted(page);
646
647 arch_alloc_page(page, order);
648 kernel_map_pages(page, 1 << order, 1);
649
650 if (gfp_flags & __GFP_ZERO)
651 prep_zero_page(page, order, gfp_flags);
652
653 if (order && (gfp_flags & __GFP_COMP))
654 prep_compound_page(page, order);
655
656 return 0;
657}
658
659
660
661
662
663static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
664 int migratetype)
665{
666 unsigned int current_order;
667 struct free_area * area;
668 struct page *page;
669
670
671 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
672 area = &(zone->free_area[current_order]);
673 if (list_empty(&area->free_list[migratetype]))
674 continue;
675
676 page = list_entry(area->free_list[migratetype].next,
677 struct page, lru);
678 list_del(&page->lru);
679 rmv_page_order(page);
680 area->nr_free--;
681 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
682 expand(zone, page, order, current_order, area, migratetype);
683 return page;
684 }
685
686 return NULL;
687}
688
689
690
691
692
693
694static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
695 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
696 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
697 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
698 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE },
699};
700
701
702
703
704
705
706static int move_freepages(struct zone *zone,
707 struct page *start_page, struct page *end_page,
708 int migratetype)
709{
710 struct page *page;
711 unsigned long order;
712 int pages_moved = 0;
713
714#ifndef CONFIG_HOLES_IN_ZONE
715
716
717
718
719
720
721
722 BUG_ON(page_zone(start_page) != page_zone(end_page));
723#endif
724
725 for (page = start_page; page <= end_page;) {
726
727 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
728
729 if (!pfn_valid_within(page_to_pfn(page))) {
730 page++;
731 continue;
732 }
733
734 if (!PageBuddy(page)) {
735 page++;
736 continue;
737 }
738
739 order = page_order(page);
740 list_del(&page->lru);
741 list_add(&page->lru,
742 &zone->free_area[order].free_list[migratetype]);
743 page += 1 << order;
744 pages_moved += 1 << order;
745 }
746
747 return pages_moved;
748}
749
750static int move_freepages_block(struct zone *zone, struct page *page,
751 int migratetype)
752{
753 unsigned long start_pfn, end_pfn;
754 struct page *start_page, *end_page;
755
756 start_pfn = page_to_pfn(page);
757 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
758 start_page = pfn_to_page(start_pfn);
759 end_page = start_page + pageblock_nr_pages - 1;
760 end_pfn = start_pfn + pageblock_nr_pages - 1;
761
762
763 if (start_pfn < zone->zone_start_pfn)
764 start_page = page;
765 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
766 return 0;
767
768 return move_freepages(zone, start_page, end_page, migratetype);
769}
770
771
772static struct page *__rmqueue_fallback(struct zone *zone, int order,
773 int start_migratetype)
774{
775 struct free_area * area;
776 int current_order;
777 struct page *page;
778 int migratetype, i;
779
780
781 for (current_order = MAX_ORDER-1; current_order >= order;
782 --current_order) {
783 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
784 migratetype = fallbacks[start_migratetype][i];
785
786
787 if (migratetype == MIGRATE_RESERVE)
788 continue;
789
790 area = &(zone->free_area[current_order]);
791 if (list_empty(&area->free_list[migratetype]))
792 continue;
793
794 page = list_entry(area->free_list[migratetype].next,
795 struct page, lru);
796 area->nr_free--;
797
798
799
800
801
802
803
804 if (unlikely(current_order >= (pageblock_order >> 1)) ||
805 start_migratetype == MIGRATE_RECLAIMABLE) {
806 unsigned long pages;
807 pages = move_freepages_block(zone, page,
808 start_migratetype);
809
810
811 if (pages >= (1 << (pageblock_order-1)))
812 set_pageblock_migratetype(page,
813 start_migratetype);
814
815 migratetype = start_migratetype;
816 }
817
818
819 list_del(&page->lru);
820 rmv_page_order(page);
821 __mod_zone_page_state(zone, NR_FREE_PAGES,
822 -(1UL << order));
823
824 if (current_order == pageblock_order)
825 set_pageblock_migratetype(page,
826 start_migratetype);
827
828 expand(zone, page, order, current_order, area, migratetype);
829 return page;
830 }
831 }
832
833
834 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
835}
836
837
838
839
840
841static struct page *__rmqueue(struct zone *zone, unsigned int order,
842 int migratetype)
843{
844 struct page *page;
845
846 page = __rmqueue_smallest(zone, order, migratetype);
847
848 if (unlikely(!page))
849 page = __rmqueue_fallback(zone, order, migratetype);
850
851 return page;
852}
853
854
855
856
857
858
859static int rmqueue_bulk(struct zone *zone, unsigned int order,
860 unsigned long count, struct list_head *list,
861 int migratetype)
862{
863 int i;
864
865 spin_lock(&zone->lock);
866 for (i = 0; i < count; ++i) {
867 struct page *page = __rmqueue(zone, order, migratetype);
868 if (unlikely(page == NULL))
869 break;
870
871
872
873
874
875
876
877
878
879
880 list_add(&page->lru, list);
881 set_page_private(page, migratetype);
882 list = &page->lru;
883 }
884 spin_unlock(&zone->lock);
885 return i;
886}
887
888#ifdef CONFIG_NUMA
889
890
891
892
893
894
895
896
897void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
898{
899 unsigned long flags;
900 int to_drain;
901
902 local_irq_save(flags);
903 if (pcp->count >= pcp->batch)
904 to_drain = pcp->batch;
905 else
906 to_drain = pcp->count;
907 free_pages_bulk(zone, to_drain, &pcp->list, 0);
908 pcp->count -= to_drain;
909 local_irq_restore(flags);
910}
911#endif
912
913
914
915
916
917
918
919
920static void drain_pages(unsigned int cpu)
921{
922 unsigned long flags;
923 struct zone *zone;
924
925 for_each_populated_zone(zone) {
926 struct per_cpu_pageset *pset;
927 struct per_cpu_pages *pcp;
928
929 pset = zone_pcp(zone, cpu);
930
931 pcp = &pset->pcp;
932 local_irq_save(flags);
933 free_pages_bulk(zone, pcp->count, &pcp->list, 0);
934 pcp->count = 0;
935 local_irq_restore(flags);
936 }
937}
938
939
940
941
942void drain_local_pages(void *arg)
943{
944 drain_pages(smp_processor_id());
945}
946
947
948
949
950void drain_all_pages(void)
951{
952 on_each_cpu(drain_local_pages, NULL, 1);
953}
954
955#ifdef CONFIG_HIBERNATION
956
957void mark_free_pages(struct zone *zone)
958{
959 unsigned long pfn, max_zone_pfn;
960 unsigned long flags;
961 int order, t;
962 struct list_head *curr;
963
964 if (!zone->spanned_pages)
965 return;
966
967 spin_lock_irqsave(&zone->lock, flags);
968
969 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
970 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
971 if (pfn_valid(pfn)) {
972 struct page *page = pfn_to_page(pfn);
973
974 if (!swsusp_page_is_forbidden(page))
975 swsusp_unset_page_free(page);
976 }
977
978 for_each_migratetype_order(order, t) {
979 list_for_each(curr, &zone->free_area[order].free_list[t]) {
980 unsigned long i;
981
982 pfn = page_to_pfn(list_entry(curr, struct page, lru));
983 for (i = 0; i < (1UL << order); i++)
984 swsusp_set_page_free(pfn_to_page(pfn + i));
985 }
986 }
987 spin_unlock_irqrestore(&zone->lock, flags);
988}
989#endif
990
991
992
993
994static void free_hot_cold_page(struct page *page, int cold)
995{
996 struct zone *zone = page_zone(page);
997 struct per_cpu_pages *pcp;
998 unsigned long flags;
999
1000 if (PageAnon(page))
1001 page->mapping = NULL;
1002 if (free_pages_check(page))
1003 return;
1004
1005 if (!PageHighMem(page)) {
1006 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1007 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
1008 }
1009 arch_free_page(page, 0);
1010 kernel_map_pages(page, 1, 0);
1011
1012 pcp = &zone_pcp(zone, get_cpu())->pcp;
1013 local_irq_save(flags);
1014 __count_vm_event(PGFREE);
1015 if (cold)
1016 list_add_tail(&page->lru, &pcp->list);
1017 else
1018 list_add(&page->lru, &pcp->list);
1019 set_page_private(page, get_pageblock_migratetype(page));
1020 pcp->count++;
1021 if (pcp->count >= pcp->high) {
1022 free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
1023 pcp->count -= pcp->batch;
1024 }
1025 local_irq_restore(flags);
1026 put_cpu();
1027}
1028
1029void free_hot_page(struct page *page)
1030{
1031 free_hot_cold_page(page, 0);
1032}
1033
1034void free_cold_page(struct page *page)
1035{
1036 free_hot_cold_page(page, 1);
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047void split_page(struct page *page, unsigned int order)
1048{
1049 int i;
1050
1051 VM_BUG_ON(PageCompound(page));
1052 VM_BUG_ON(!page_count(page));
1053 for (i = 1; i < (1 << order); i++)
1054 set_page_refcounted(page + i);
1055}
1056
1057
1058
1059
1060
1061
1062static struct page *buffered_rmqueue(struct zone *preferred_zone,
1063 struct zone *zone, int order, gfp_t gfp_flags)
1064{
1065 unsigned long flags;
1066 struct page *page;
1067 int cold = !!(gfp_flags & __GFP_COLD);
1068 int cpu;
1069 int migratetype = allocflags_to_migratetype(gfp_flags);
1070
1071again:
1072 cpu = get_cpu();
1073 if (likely(order == 0)) {
1074 struct per_cpu_pages *pcp;
1075
1076 pcp = &zone_pcp(zone, cpu)->pcp;
1077 local_irq_save(flags);
1078 if (!pcp->count) {
1079 pcp->count = rmqueue_bulk(zone, 0,
1080 pcp->batch, &pcp->list, migratetype);
1081 if (unlikely(!pcp->count))
1082 goto failed;
1083 }
1084
1085
1086 if (cold) {
1087 list_for_each_entry_reverse(page, &pcp->list, lru)
1088 if (page_private(page) == migratetype)
1089 break;
1090 } else {
1091 list_for_each_entry(page, &pcp->list, lru)
1092 if (page_private(page) == migratetype)
1093 break;
1094 }
1095
1096
1097 if (unlikely(&page->lru == &pcp->list)) {
1098 pcp->count += rmqueue_bulk(zone, 0,
1099 pcp->batch, &pcp->list, migratetype);
1100 page = list_entry(pcp->list.next, struct page, lru);
1101 }
1102
1103 list_del(&page->lru);
1104 pcp->count--;
1105 } else {
1106 spin_lock_irqsave(&zone->lock, flags);
1107 page = __rmqueue(zone, order, migratetype);
1108 spin_unlock(&zone->lock);
1109 if (!page)
1110 goto failed;
1111 }
1112
1113 __count_zone_vm_events(PGALLOC, zone, 1 << order);
1114 zone_statistics(preferred_zone, zone);
1115 local_irq_restore(flags);
1116 put_cpu();
1117
1118 VM_BUG_ON(bad_range(zone, page));
1119 if (prep_new_page(page, order, gfp_flags))
1120 goto again;
1121 return page;
1122
1123failed:
1124 local_irq_restore(flags);
1125 put_cpu();
1126 return NULL;
1127}
1128
1129#define ALLOC_NO_WATERMARKS 0x01
1130#define ALLOC_WMARK_MIN 0x02
1131#define ALLOC_WMARK_LOW 0x04
1132#define ALLOC_WMARK_HIGH 0x08
1133#define ALLOC_HARDER 0x10
1134#define ALLOC_HIGH 0x20
1135#define ALLOC_CPUSET 0x40
1136
1137#ifdef CONFIG_FAIL_PAGE_ALLOC
1138
1139static struct fail_page_alloc_attr {
1140 struct fault_attr attr;
1141
1142 u32 ignore_gfp_highmem;
1143 u32 ignore_gfp_wait;
1144 u32 min_order;
1145
1146#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1147
1148 struct dentry *ignore_gfp_highmem_file;
1149 struct dentry *ignore_gfp_wait_file;
1150 struct dentry *min_order_file;
1151
1152#endif
1153
1154} fail_page_alloc = {
1155 .attr = FAULT_ATTR_INITIALIZER,
1156 .ignore_gfp_wait = 1,
1157 .ignore_gfp_highmem = 1,
1158 .min_order = 1,
1159};
1160
1161static int __init setup_fail_page_alloc(char *str)
1162{
1163 return setup_fault_attr(&fail_page_alloc.attr, str);
1164}
1165__setup("fail_page_alloc=", setup_fail_page_alloc);
1166
1167static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1168{
1169 if (order < fail_page_alloc.min_order)
1170 return 0;
1171 if (gfp_mask & __GFP_NOFAIL)
1172 return 0;
1173 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1174 return 0;
1175 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1176 return 0;
1177
1178 return should_fail(&fail_page_alloc.attr, 1 << order);
1179}
1180
1181#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1182
1183static int __init fail_page_alloc_debugfs(void)
1184{
1185 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1186 struct dentry *dir;
1187 int err;
1188
1189 err = init_fault_attr_dentries(&fail_page_alloc.attr,
1190 "fail_page_alloc");
1191 if (err)
1192 return err;
1193 dir = fail_page_alloc.attr.dentries.dir;
1194
1195 fail_page_alloc.ignore_gfp_wait_file =
1196 debugfs_create_bool("ignore-gfp-wait", mode, dir,
1197 &fail_page_alloc.ignore_gfp_wait);
1198
1199 fail_page_alloc.ignore_gfp_highmem_file =
1200 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1201 &fail_page_alloc.ignore_gfp_highmem);
1202 fail_page_alloc.min_order_file =
1203 debugfs_create_u32("min-order", mode, dir,
1204 &fail_page_alloc.min_order);
1205
1206 if (!fail_page_alloc.ignore_gfp_wait_file ||
1207 !fail_page_alloc.ignore_gfp_highmem_file ||
1208 !fail_page_alloc.min_order_file) {
1209 err = -ENOMEM;
1210 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1211 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1212 debugfs_remove(fail_page_alloc.min_order_file);
1213 cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1214 }
1215
1216 return err;
1217}
1218
1219late_initcall(fail_page_alloc_debugfs);
1220
1221#endif
1222
1223#else
1224
1225static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1226{
1227 return 0;
1228}
1229
1230#endif
1231
1232
1233
1234
1235
1236int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1237 int classzone_idx, int alloc_flags)
1238{
1239
1240 long min = mark;
1241 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1242 int o;
1243
1244 if (alloc_flags & ALLOC_HIGH)
1245 min -= min / 2;
1246 if (alloc_flags & ALLOC_HARDER)
1247 min -= min / 4;
1248
1249 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1250 return 0;
1251 for (o = 0; o < order; o++) {
1252
1253 free_pages -= z->free_area[o].nr_free << o;
1254
1255
1256 min >>= 1;
1257
1258 if (free_pages <= min)
1259 return 0;
1260 }
1261 return 1;
1262}
1263
1264#ifdef CONFIG_NUMA
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1288{
1289 struct zonelist_cache *zlc;
1290 nodemask_t *allowednodes;
1291
1292 zlc = zonelist->zlcache_ptr;
1293 if (!zlc)
1294 return NULL;
1295
1296 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1297 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1298 zlc->last_full_zap = jiffies;
1299 }
1300
1301 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1302 &cpuset_current_mems_allowed :
1303 &node_states[N_HIGH_MEMORY];
1304 return allowednodes;
1305}
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1330 nodemask_t *allowednodes)
1331{
1332 struct zonelist_cache *zlc;
1333 int i;
1334 int n;
1335
1336 zlc = zonelist->zlcache_ptr;
1337 if (!zlc)
1338 return 1;
1339
1340 i = z - zonelist->_zonerefs;
1341 n = zlc->z_to_n[i];
1342
1343
1344 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1345}
1346
1347
1348
1349
1350
1351
1352static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1353{
1354 struct zonelist_cache *zlc;
1355 int i;
1356
1357 zlc = zonelist->zlcache_ptr;
1358 if (!zlc)
1359 return;
1360
1361 i = z - zonelist->_zonerefs;
1362
1363 set_bit(i, zlc->fullzones);
1364}
1365
1366#else
1367
1368static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1369{
1370 return NULL;
1371}
1372
1373static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1374 nodemask_t *allowednodes)
1375{
1376 return 1;
1377}
1378
1379static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1380{
1381}
1382#endif
1383
1384
1385
1386
1387
1388static struct page *
1389get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1390 struct zonelist *zonelist, int high_zoneidx, int alloc_flags)
1391{
1392 struct zoneref *z;
1393 struct page *page = NULL;
1394 int classzone_idx;
1395 struct zone *zone, *preferred_zone;
1396 nodemask_t *allowednodes = NULL;
1397 int zlc_active = 0;
1398 int did_zlc_setup = 0;
1399
1400 (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
1401 &preferred_zone);
1402 if (!preferred_zone)
1403 return NULL;
1404
1405 classzone_idx = zone_idx(preferred_zone);
1406
1407zonelist_scan:
1408
1409
1410
1411
1412 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1413 high_zoneidx, nodemask) {
1414 if (NUMA_BUILD && zlc_active &&
1415 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1416 continue;
1417 if ((alloc_flags & ALLOC_CPUSET) &&
1418 !cpuset_zone_allowed_softwall(zone, gfp_mask))
1419 goto try_next_zone;
1420
1421 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1422 unsigned long mark;
1423 if (alloc_flags & ALLOC_WMARK_MIN)
1424 mark = zone->pages_min;
1425 else if (alloc_flags & ALLOC_WMARK_LOW)
1426 mark = zone->pages_low;
1427 else
1428 mark = zone->pages_high;
1429 if (!zone_watermark_ok(zone, order, mark,
1430 classzone_idx, alloc_flags)) {
1431 if (!zone_reclaim_mode ||
1432 !zone_reclaim(zone, gfp_mask, order))
1433 goto this_zone_full;
1434 }
1435 }
1436
1437 page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
1438 if (page)
1439 break;
1440this_zone_full:
1441 if (NUMA_BUILD)
1442 zlc_mark_zone_full(zonelist, z);
1443try_next_zone:
1444 if (NUMA_BUILD && !did_zlc_setup) {
1445
1446 allowednodes = zlc_setup(zonelist, alloc_flags);
1447 zlc_active = 1;
1448 did_zlc_setup = 1;
1449 }
1450 }
1451
1452 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1453
1454 zlc_active = 0;
1455 goto zonelist_scan;
1456 }
1457 return page;
1458}
1459
1460
1461
1462
1463struct page *
1464__alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
1465 struct zonelist *zonelist, nodemask_t *nodemask)
1466{
1467 const gfp_t wait = gfp_mask & __GFP_WAIT;
1468 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1469 struct zoneref *z;
1470 struct zone *zone;
1471 struct page *page;
1472 struct reclaim_state reclaim_state;
1473 struct task_struct *p = current;
1474 int do_retry;
1475 int alloc_flags;
1476 unsigned long did_some_progress;
1477 unsigned long pages_reclaimed = 0;
1478
1479 lockdep_trace_alloc(gfp_mask);
1480
1481 might_sleep_if(wait);
1482
1483 if (should_fail_alloc_page(gfp_mask, order))
1484 return NULL;
1485
1486restart:
1487 z = zonelist->_zonerefs;
1488
1489 if (unlikely(!z->zone)) {
1490
1491
1492
1493
1494 return NULL;
1495 }
1496
1497 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1498 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET);
1499 if (page)
1500 goto got_pg;
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1511 goto nopage;
1512
1513 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1514 wakeup_kswapd(zone, order);
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526 alloc_flags = ALLOC_WMARK_MIN;
1527 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1528 alloc_flags |= ALLOC_HARDER;
1529 if (gfp_mask & __GFP_HIGH)
1530 alloc_flags |= ALLOC_HIGH;
1531 if (wait)
1532 alloc_flags |= ALLOC_CPUSET;
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1543 high_zoneidx, alloc_flags);
1544 if (page)
1545 goto got_pg;
1546
1547
1548
1549rebalance:
1550 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
1551 && !in_interrupt()) {
1552 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
1553nofail_alloc:
1554
1555 page = get_page_from_freelist(gfp_mask, nodemask, order,
1556 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
1557 if (page)
1558 goto got_pg;
1559 if (gfp_mask & __GFP_NOFAIL) {
1560 congestion_wait(WRITE, HZ/50);
1561 goto nofail_alloc;
1562 }
1563 }
1564 goto nopage;
1565 }
1566
1567
1568 if (!wait)
1569 goto nopage;
1570
1571 cond_resched();
1572
1573
1574 cpuset_memory_pressure_bump();
1575
1576
1577
1578 cpuset_update_task_memory_state();
1579 p->flags |= PF_MEMALLOC;
1580
1581 lockdep_set_current_reclaim_state(gfp_mask);
1582 reclaim_state.reclaimed_slab = 0;
1583 p->reclaim_state = &reclaim_state;
1584
1585 did_some_progress = try_to_free_pages(zonelist, order,
1586 gfp_mask, nodemask);
1587
1588 p->reclaim_state = NULL;
1589 lockdep_clear_current_reclaim_state();
1590 p->flags &= ~PF_MEMALLOC;
1591
1592 cond_resched();
1593
1594 if (order != 0)
1595 drain_all_pages();
1596
1597 if (likely(did_some_progress)) {
1598 page = get_page_from_freelist(gfp_mask, nodemask, order,
1599 zonelist, high_zoneidx, alloc_flags);
1600 if (page)
1601 goto got_pg;
1602 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1603 if (!try_set_zone_oom(zonelist, gfp_mask)) {
1604 schedule_timeout_uninterruptible(1);
1605 goto restart;
1606 }
1607
1608
1609
1610
1611
1612
1613
1614 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1615 order, zonelist, high_zoneidx,
1616 ALLOC_WMARK_HIGH|ALLOC_CPUSET);
1617 if (page) {
1618 clear_zonelist_oom(zonelist, gfp_mask);
1619 goto got_pg;
1620 }
1621
1622
1623 if (order > PAGE_ALLOC_COSTLY_ORDER) {
1624 clear_zonelist_oom(zonelist, gfp_mask);
1625 goto nopage;
1626 }
1627
1628 out_of_memory(zonelist, gfp_mask, order);
1629 clear_zonelist_oom(zonelist, gfp_mask);
1630 goto restart;
1631 }
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647 pages_reclaimed += did_some_progress;
1648 do_retry = 0;
1649 if (!(gfp_mask & __GFP_NORETRY)) {
1650 if (order <= PAGE_ALLOC_COSTLY_ORDER) {
1651 do_retry = 1;
1652 } else {
1653 if (gfp_mask & __GFP_REPEAT &&
1654 pages_reclaimed < (1 << order))
1655 do_retry = 1;
1656 }
1657 if (gfp_mask & __GFP_NOFAIL)
1658 do_retry = 1;
1659 }
1660 if (do_retry) {
1661 congestion_wait(WRITE, HZ/50);
1662 goto rebalance;
1663 }
1664
1665nopage:
1666 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1667 printk(KERN_WARNING "%s: page allocation failure."
1668 " order:%d, mode:0x%x\n",
1669 p->comm, order, gfp_mask);
1670 dump_stack();
1671 show_mem();
1672 }
1673got_pg:
1674 return page;
1675}
1676EXPORT_SYMBOL(__alloc_pages_internal);
1677
1678
1679
1680
1681unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1682{
1683 struct page * page;
1684 page = alloc_pages(gfp_mask, order);
1685 if (!page)
1686 return 0;
1687 return (unsigned long) page_address(page);
1688}
1689
1690EXPORT_SYMBOL(__get_free_pages);
1691
1692unsigned long get_zeroed_page(gfp_t gfp_mask)
1693{
1694 struct page * page;
1695
1696
1697
1698
1699
1700 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1701
1702 page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1703 if (page)
1704 return (unsigned long) page_address(page);
1705 return 0;
1706}
1707
1708EXPORT_SYMBOL(get_zeroed_page);
1709
1710void __pagevec_free(struct pagevec *pvec)
1711{
1712 int i = pagevec_count(pvec);
1713
1714 while (--i >= 0)
1715 free_hot_cold_page(pvec->pages[i], pvec->cold);
1716}
1717
1718void __free_pages(struct page *page, unsigned int order)
1719{
1720 if (put_page_testzero(page)) {
1721 if (order == 0)
1722 free_hot_page(page);
1723 else
1724 __free_pages_ok(page, order);
1725 }
1726}
1727
1728EXPORT_SYMBOL(__free_pages);
1729
1730void free_pages(unsigned long addr, unsigned int order)
1731{
1732 if (addr != 0) {
1733 VM_BUG_ON(!virt_addr_valid((void *)addr));
1734 __free_pages(virt_to_page((void *)addr), order);
1735 }
1736}
1737
1738EXPORT_SYMBOL(free_pages);
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
1754{
1755 unsigned int order = get_order(size);
1756 unsigned long addr;
1757
1758 addr = __get_free_pages(gfp_mask, order);
1759 if (addr) {
1760 unsigned long alloc_end = addr + (PAGE_SIZE << order);
1761 unsigned long used = addr + PAGE_ALIGN(size);
1762
1763 split_page(virt_to_page(addr), order);
1764 while (used < alloc_end) {
1765 free_page(used);
1766 used += PAGE_SIZE;
1767 }
1768 }
1769
1770 return (void *)addr;
1771}
1772EXPORT_SYMBOL(alloc_pages_exact);
1773
1774
1775
1776
1777
1778
1779
1780
1781void free_pages_exact(void *virt, size_t size)
1782{
1783 unsigned long addr = (unsigned long)virt;
1784 unsigned long end = addr + PAGE_ALIGN(size);
1785
1786 while (addr < end) {
1787 free_page(addr);
1788 addr += PAGE_SIZE;
1789 }
1790}
1791EXPORT_SYMBOL(free_pages_exact);
1792
1793static unsigned int nr_free_zone_pages(int offset)
1794{
1795 struct zoneref *z;
1796 struct zone *zone;
1797
1798
1799 unsigned int sum = 0;
1800
1801 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1802
1803 for_each_zone_zonelist(zone, z, zonelist, offset) {
1804 unsigned long size = zone->present_pages;
1805 unsigned long high = zone->pages_high;
1806 if (size > high)
1807 sum += size - high;
1808 }
1809
1810 return sum;
1811}
1812
1813
1814
1815
1816unsigned int nr_free_buffer_pages(void)
1817{
1818 return nr_free_zone_pages(gfp_zone(GFP_USER));
1819}
1820EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1821
1822
1823
1824
1825unsigned int nr_free_pagecache_pages(void)
1826{
1827 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1828}
1829
1830static inline void show_node(struct zone *zone)
1831{
1832 if (NUMA_BUILD)
1833 printk("Node %d ", zone_to_nid(zone));
1834}
1835
1836void si_meminfo(struct sysinfo *val)
1837{
1838 val->totalram = totalram_pages;
1839 val->sharedram = 0;
1840 val->freeram = global_page_state(NR_FREE_PAGES);
1841 val->bufferram = nr_blockdev_pages();
1842 val->totalhigh = totalhigh_pages;
1843 val->freehigh = nr_free_highpages();
1844 val->mem_unit = PAGE_SIZE;
1845}
1846
1847EXPORT_SYMBOL(si_meminfo);
1848
1849#ifdef CONFIG_NUMA
1850void si_meminfo_node(struct sysinfo *val, int nid)
1851{
1852 pg_data_t *pgdat = NODE_DATA(nid);
1853
1854 val->totalram = pgdat->node_present_pages;
1855 val->freeram = node_page_state(nid, NR_FREE_PAGES);
1856#ifdef CONFIG_HIGHMEM
1857 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1858 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
1859 NR_FREE_PAGES);
1860#else
1861 val->totalhigh = 0;
1862 val->freehigh = 0;
1863#endif
1864 val->mem_unit = PAGE_SIZE;
1865}
1866#endif
1867
1868#define K(x) ((x) << (PAGE_SHIFT-10))
1869
1870
1871
1872
1873
1874
1875void show_free_areas(void)
1876{
1877 int cpu;
1878 struct zone *zone;
1879
1880 for_each_populated_zone(zone) {
1881 show_node(zone);
1882 printk("%s per-cpu:\n", zone->name);
1883
1884 for_each_online_cpu(cpu) {
1885 struct per_cpu_pageset *pageset;
1886
1887 pageset = zone_pcp(zone, cpu);
1888
1889 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
1890 cpu, pageset->pcp.high,
1891 pageset->pcp.batch, pageset->pcp.count);
1892 }
1893 }
1894
1895 printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
1896 " inactive_file:%lu"
1897
1898#ifdef CONFIG_UNEVICTABLE_LRU
1899 " unevictable:%lu"
1900#endif
1901 " dirty:%lu writeback:%lu unstable:%lu\n"
1902 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
1903 global_page_state(NR_ACTIVE_ANON),
1904 global_page_state(NR_ACTIVE_FILE),
1905 global_page_state(NR_INACTIVE_ANON),
1906 global_page_state(NR_INACTIVE_FILE),
1907#ifdef CONFIG_UNEVICTABLE_LRU
1908 global_page_state(NR_UNEVICTABLE),
1909#endif
1910 global_page_state(NR_FILE_DIRTY),
1911 global_page_state(NR_WRITEBACK),
1912 global_page_state(NR_UNSTABLE_NFS),
1913 global_page_state(NR_FREE_PAGES),
1914 global_page_state(NR_SLAB_RECLAIMABLE) +
1915 global_page_state(NR_SLAB_UNRECLAIMABLE),
1916 global_page_state(NR_FILE_MAPPED),
1917 global_page_state(NR_PAGETABLE),
1918 global_page_state(NR_BOUNCE));
1919
1920 for_each_populated_zone(zone) {
1921 int i;
1922
1923 show_node(zone);
1924 printk("%s"
1925 " free:%lukB"
1926 " min:%lukB"
1927 " low:%lukB"
1928 " high:%lukB"
1929 " active_anon:%lukB"
1930 " inactive_anon:%lukB"
1931 " active_file:%lukB"
1932 " inactive_file:%lukB"
1933#ifdef CONFIG_UNEVICTABLE_LRU
1934 " unevictable:%lukB"
1935#endif
1936 " present:%lukB"
1937 " pages_scanned:%lu"
1938 " all_unreclaimable? %s"
1939 "\n",
1940 zone->name,
1941 K(zone_page_state(zone, NR_FREE_PAGES)),
1942 K(zone->pages_min),
1943 K(zone->pages_low),
1944 K(zone->pages_high),
1945 K(zone_page_state(zone, NR_ACTIVE_ANON)),
1946 K(zone_page_state(zone, NR_INACTIVE_ANON)),
1947 K(zone_page_state(zone, NR_ACTIVE_FILE)),
1948 K(zone_page_state(zone, NR_INACTIVE_FILE)),
1949#ifdef CONFIG_UNEVICTABLE_LRU
1950 K(zone_page_state(zone, NR_UNEVICTABLE)),
1951#endif
1952 K(zone->present_pages),
1953 zone->pages_scanned,
1954 (zone_is_all_unreclaimable(zone) ? "yes" : "no")
1955 );
1956 printk("lowmem_reserve[]:");
1957 for (i = 0; i < MAX_NR_ZONES; i++)
1958 printk(" %lu", zone->lowmem_reserve[i]);
1959 printk("\n");
1960 }
1961
1962 for_each_populated_zone(zone) {
1963 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1964
1965 show_node(zone);
1966 printk("%s: ", zone->name);
1967
1968 spin_lock_irqsave(&zone->lock, flags);
1969 for (order = 0; order < MAX_ORDER; order++) {
1970 nr[order] = zone->free_area[order].nr_free;
1971 total += nr[order] << order;
1972 }
1973 spin_unlock_irqrestore(&zone->lock, flags);
1974 for (order = 0; order < MAX_ORDER; order++)
1975 printk("%lu*%lukB ", nr[order], K(1UL) << order);
1976 printk("= %lukB\n", K(total));
1977 }
1978
1979 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
1980
1981 show_swap_cache_info();
1982}
1983
1984static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
1985{
1986 zoneref->zone = zone;
1987 zoneref->zone_idx = zone_idx(zone);
1988}
1989
1990
1991
1992
1993
1994
1995static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
1996 int nr_zones, enum zone_type zone_type)
1997{
1998 struct zone *zone;
1999
2000 BUG_ON(zone_type >= MAX_NR_ZONES);
2001 zone_type++;
2002
2003 do {
2004 zone_type--;
2005 zone = pgdat->node_zones + zone_type;
2006 if (populated_zone(zone)) {
2007 zoneref_set_zone(zone,
2008 &zonelist->_zonerefs[nr_zones++]);
2009 check_highest_zone(zone_type);
2010 }
2011
2012 } while (zone_type);
2013 return nr_zones;
2014}
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026#define ZONELIST_ORDER_DEFAULT 0
2027#define ZONELIST_ORDER_NODE 1
2028#define ZONELIST_ORDER_ZONE 2
2029
2030
2031
2032
2033static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2034static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2035
2036
2037#ifdef CONFIG_NUMA
2038
2039static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2040
2041#define NUMA_ZONELIST_ORDER_LEN 16
2042char numa_zonelist_order[16] = "default";
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052static int __parse_numa_zonelist_order(char *s)
2053{
2054 if (*s == 'd' || *s == 'D') {
2055 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2056 } else if (*s == 'n' || *s == 'N') {
2057 user_zonelist_order = ZONELIST_ORDER_NODE;
2058 } else if (*s == 'z' || *s == 'Z') {
2059 user_zonelist_order = ZONELIST_ORDER_ZONE;
2060 } else {
2061 printk(KERN_WARNING
2062 "Ignoring invalid numa_zonelist_order value: "
2063 "%s\n", s);
2064 return -EINVAL;
2065 }
2066 return 0;
2067}
2068
2069static __init int setup_numa_zonelist_order(char *s)
2070{
2071 if (s)
2072 return __parse_numa_zonelist_order(s);
2073 return 0;
2074}
2075early_param("numa_zonelist_order", setup_numa_zonelist_order);
2076
2077
2078
2079
2080int numa_zonelist_order_handler(ctl_table *table, int write,
2081 struct file *file, void __user *buffer, size_t *length,
2082 loff_t *ppos)
2083{
2084 char saved_string[NUMA_ZONELIST_ORDER_LEN];
2085 int ret;
2086
2087 if (write)
2088 strncpy(saved_string, (char*)table->data,
2089 NUMA_ZONELIST_ORDER_LEN);
2090 ret = proc_dostring(table, write, file, buffer, length, ppos);
2091 if (ret)
2092 return ret;
2093 if (write) {
2094 int oldval = user_zonelist_order;
2095 if (__parse_numa_zonelist_order((char*)table->data)) {
2096
2097
2098
2099 strncpy((char*)table->data, saved_string,
2100 NUMA_ZONELIST_ORDER_LEN);
2101 user_zonelist_order = oldval;
2102 } else if (oldval != user_zonelist_order)
2103 build_all_zonelists();
2104 }
2105 return 0;
2106}
2107
2108
2109#define MAX_NODE_LOAD (num_online_nodes())
2110static int node_load[MAX_NUMNODES];
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126static int find_next_best_node(int node, nodemask_t *used_node_mask)
2127{
2128 int n, val;
2129 int min_val = INT_MAX;
2130 int best_node = -1;
2131 const struct cpumask *tmp = cpumask_of_node(0);
2132
2133
2134 if (!node_isset(node, *used_node_mask)) {
2135 node_set(node, *used_node_mask);
2136 return node;
2137 }
2138
2139 for_each_node_state(n, N_HIGH_MEMORY) {
2140
2141
2142 if (node_isset(n, *used_node_mask))
2143 continue;
2144
2145
2146 val = node_distance(node, n);
2147
2148
2149 val += (n < node);
2150
2151
2152 tmp = cpumask_of_node(n);
2153 if (!cpumask_empty(tmp))
2154 val += PENALTY_FOR_NODE_WITH_CPUS;
2155
2156
2157 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2158 val += node_load[n];
2159
2160 if (val < min_val) {
2161 min_val = val;
2162 best_node = n;
2163 }
2164 }
2165
2166 if (best_node >= 0)
2167 node_set(best_node, *used_node_mask);
2168
2169 return best_node;
2170}
2171
2172
2173
2174
2175
2176
2177
2178static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2179{
2180 int j;
2181 struct zonelist *zonelist;
2182
2183 zonelist = &pgdat->node_zonelists[0];
2184 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2185 ;
2186 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2187 MAX_NR_ZONES - 1);
2188 zonelist->_zonerefs[j].zone = NULL;
2189 zonelist->_zonerefs[j].zone_idx = 0;
2190}
2191
2192
2193
2194
2195static void build_thisnode_zonelists(pg_data_t *pgdat)
2196{
2197 int j;
2198 struct zonelist *zonelist;
2199
2200 zonelist = &pgdat->node_zonelists[1];
2201 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2202 zonelist->_zonerefs[j].zone = NULL;
2203 zonelist->_zonerefs[j].zone_idx = 0;
2204}
2205
2206
2207
2208
2209
2210
2211
2212static int node_order[MAX_NUMNODES];
2213
2214static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2215{
2216 int pos, j, node;
2217 int zone_type;
2218 struct zone *z;
2219 struct zonelist *zonelist;
2220
2221 zonelist = &pgdat->node_zonelists[0];
2222 pos = 0;
2223 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2224 for (j = 0; j < nr_nodes; j++) {
2225 node = node_order[j];
2226 z = &NODE_DATA(node)->node_zones[zone_type];
2227 if (populated_zone(z)) {
2228 zoneref_set_zone(z,
2229 &zonelist->_zonerefs[pos++]);
2230 check_highest_zone(zone_type);
2231 }
2232 }
2233 }
2234 zonelist->_zonerefs[pos].zone = NULL;
2235 zonelist->_zonerefs[pos].zone_idx = 0;
2236}
2237
2238static int default_zonelist_order(void)
2239{
2240 int nid, zone_type;
2241 unsigned long low_kmem_size,total_size;
2242 struct zone *z;
2243 int average_size;
2244
2245
2246
2247
2248
2249
2250
2251 low_kmem_size = 0;
2252 total_size = 0;
2253 for_each_online_node(nid) {
2254 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2255 z = &NODE_DATA(nid)->node_zones[zone_type];
2256 if (populated_zone(z)) {
2257 if (zone_type < ZONE_NORMAL)
2258 low_kmem_size += z->present_pages;
2259 total_size += z->present_pages;
2260 }
2261 }
2262 }
2263 if (!low_kmem_size ||
2264 low_kmem_size > total_size/2)
2265 return ZONELIST_ORDER_NODE;
2266
2267
2268
2269
2270
2271 average_size = total_size /
2272 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2273 for_each_online_node(nid) {
2274 low_kmem_size = 0;
2275 total_size = 0;
2276 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2277 z = &NODE_DATA(nid)->node_zones[zone_type];
2278 if (populated_zone(z)) {
2279 if (zone_type < ZONE_NORMAL)
2280 low_kmem_size += z->present_pages;
2281 total_size += z->present_pages;
2282 }
2283 }
2284 if (low_kmem_size &&
2285 total_size > average_size &&
2286 low_kmem_size > total_size * 70/100)
2287 return ZONELIST_ORDER_NODE;
2288 }
2289 return ZONELIST_ORDER_ZONE;
2290}
2291
2292static void set_zonelist_order(void)
2293{
2294 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2295 current_zonelist_order = default_zonelist_order();
2296 else
2297 current_zonelist_order = user_zonelist_order;
2298}
2299
2300static void build_zonelists(pg_data_t *pgdat)
2301{
2302 int j, node, load;
2303 enum zone_type i;
2304 nodemask_t used_mask;
2305 int local_node, prev_node;
2306 struct zonelist *zonelist;
2307 int order = current_zonelist_order;
2308
2309
2310 for (i = 0; i < MAX_ZONELISTS; i++) {
2311 zonelist = pgdat->node_zonelists + i;
2312 zonelist->_zonerefs[0].zone = NULL;
2313 zonelist->_zonerefs[0].zone_idx = 0;
2314 }
2315
2316
2317 local_node = pgdat->node_id;
2318 load = num_online_nodes();
2319 prev_node = local_node;
2320 nodes_clear(used_mask);
2321
2322 memset(node_load, 0, sizeof(node_load));
2323 memset(node_order, 0, sizeof(node_order));
2324 j = 0;
2325
2326 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2327 int distance = node_distance(local_node, node);
2328
2329
2330
2331
2332
2333 if (distance > RECLAIM_DISTANCE)
2334 zone_reclaim_mode = 1;
2335
2336
2337
2338
2339
2340
2341 if (distance != node_distance(local_node, prev_node))
2342 node_load[node] = load;
2343
2344 prev_node = node;
2345 load--;
2346 if (order == ZONELIST_ORDER_NODE)
2347 build_zonelists_in_node_order(pgdat, node);
2348 else
2349 node_order[j++] = node;
2350 }
2351
2352 if (order == ZONELIST_ORDER_ZONE) {
2353
2354 build_zonelists_in_zone_order(pgdat, j);
2355 }
2356
2357 build_thisnode_zonelists(pgdat);
2358}
2359
2360
2361static void build_zonelist_cache(pg_data_t *pgdat)
2362{
2363 struct zonelist *zonelist;
2364 struct zonelist_cache *zlc;
2365 struct zoneref *z;
2366
2367 zonelist = &pgdat->node_zonelists[0];
2368 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2369 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2370 for (z = zonelist->_zonerefs; z->zone; z++)
2371 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2372}
2373
2374
2375#else
2376
2377static void set_zonelist_order(void)
2378{
2379 current_zonelist_order = ZONELIST_ORDER_ZONE;
2380}
2381
2382static void build_zonelists(pg_data_t *pgdat)
2383{
2384 int node, local_node;
2385 enum zone_type j;
2386 struct zonelist *zonelist;
2387
2388 local_node = pgdat->node_id;
2389
2390 zonelist = &pgdat->node_zonelists[0];
2391 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2402 if (!node_online(node))
2403 continue;
2404 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2405 MAX_NR_ZONES - 1);
2406 }
2407 for (node = 0; node < local_node; node++) {
2408 if (!node_online(node))
2409 continue;
2410 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2411 MAX_NR_ZONES - 1);
2412 }
2413
2414 zonelist->_zonerefs[j].zone = NULL;
2415 zonelist->_zonerefs[j].zone_idx = 0;
2416}
2417
2418
2419static void build_zonelist_cache(pg_data_t *pgdat)
2420{
2421 pgdat->node_zonelists[0].zlcache_ptr = NULL;
2422}
2423
2424#endif
2425
2426
2427static int __build_all_zonelists(void *dummy)
2428{
2429 int nid;
2430
2431 for_each_online_node(nid) {
2432 pg_data_t *pgdat = NODE_DATA(nid);
2433
2434 build_zonelists(pgdat);
2435 build_zonelist_cache(pgdat);
2436 }
2437 return 0;
2438}
2439
2440void build_all_zonelists(void)
2441{
2442 set_zonelist_order();
2443
2444 if (system_state == SYSTEM_BOOTING) {
2445 __build_all_zonelists(NULL);
2446 mminit_verify_zonelist();
2447 cpuset_init_current_mems_allowed();
2448 } else {
2449
2450
2451 stop_machine(__build_all_zonelists, NULL, NULL);
2452
2453 }
2454 vm_total_pages = nr_free_pagecache_pages();
2455
2456
2457
2458
2459
2460
2461
2462 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2463 page_group_by_mobility_disabled = 1;
2464 else
2465 page_group_by_mobility_disabled = 0;
2466
2467 printk("Built %i zonelists in %s order, mobility grouping %s. "
2468 "Total pages: %ld\n",
2469 num_online_nodes(),
2470 zonelist_order_name[current_zonelist_order],
2471 page_group_by_mobility_disabled ? "off" : "on",
2472 vm_total_pages);
2473#ifdef CONFIG_NUMA
2474 printk("Policy zone: %s\n", zone_names[policy_zone]);
2475#endif
2476}
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489#define PAGES_PER_WAITQUEUE 256
2490
2491#ifndef CONFIG_MEMORY_HOTPLUG
2492static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2493{
2494 unsigned long size = 1;
2495
2496 pages /= PAGES_PER_WAITQUEUE;
2497
2498 while (size < pages)
2499 size <<= 1;
2500
2501
2502
2503
2504
2505
2506 size = min(size, 4096UL);
2507
2508 return max(size, 4UL);
2509}
2510#else
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2529{
2530 return 4096UL;
2531}
2532#endif
2533
2534
2535
2536
2537
2538
2539static inline unsigned long wait_table_bits(unsigned long size)
2540{
2541 return ffz(~size);
2542}
2543
2544#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2545
2546
2547
2548
2549
2550
2551
2552
2553static void setup_zone_migrate_reserve(struct zone *zone)
2554{
2555 unsigned long start_pfn, pfn, end_pfn;
2556 struct page *page;
2557 unsigned long reserve, block_migratetype;
2558
2559
2560 start_pfn = zone->zone_start_pfn;
2561 end_pfn = start_pfn + zone->spanned_pages;
2562 reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
2563 pageblock_order;
2564
2565 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2566 if (!pfn_valid(pfn))
2567 continue;
2568 page = pfn_to_page(pfn);
2569
2570
2571 if (page_to_nid(page) != zone_to_nid(zone))
2572 continue;
2573
2574
2575 if (PageReserved(page))
2576 continue;
2577
2578 block_migratetype = get_pageblock_migratetype(page);
2579
2580
2581 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2582 reserve--;
2583 continue;
2584 }
2585
2586
2587 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2588 set_pageblock_migratetype(page, MIGRATE_RESERVE);
2589 move_freepages_block(zone, page, MIGRATE_RESERVE);
2590 reserve--;
2591 continue;
2592 }
2593
2594
2595
2596
2597
2598 if (block_migratetype == MIGRATE_RESERVE) {
2599 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2600 move_freepages_block(zone, page, MIGRATE_MOVABLE);
2601 }
2602 }
2603}
2604
2605
2606
2607
2608
2609
2610void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2611 unsigned long start_pfn, enum memmap_context context)
2612{
2613 struct page *page;
2614 unsigned long end_pfn = start_pfn + size;
2615 unsigned long pfn;
2616 struct zone *z;
2617
2618 if (highest_memmap_pfn < end_pfn - 1)
2619 highest_memmap_pfn = end_pfn - 1;
2620
2621 z = &NODE_DATA(nid)->node_zones[zone];
2622 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2623
2624
2625
2626
2627
2628 if (context == MEMMAP_EARLY) {
2629 if (!early_pfn_valid(pfn))
2630 continue;
2631 if (!early_pfn_in_nid(pfn, nid))
2632 continue;
2633 }
2634 page = pfn_to_page(pfn);
2635 set_page_links(page, zone, nid, pfn);
2636 mminit_verify_page_links(page, zone, nid, pfn);
2637 init_page_count(page);
2638 reset_page_mapcount(page);
2639 SetPageReserved(page);
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654 if ((z->zone_start_pfn <= pfn)
2655 && (pfn < z->zone_start_pfn + z->spanned_pages)
2656 && !(pfn & (pageblock_nr_pages - 1)))
2657 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2658
2659 INIT_LIST_HEAD(&page->lru);
2660#ifdef WANT_PAGE_VIRTUAL
2661
2662 if (!is_highmem_idx(zone))
2663 set_page_address(page, __va(pfn << PAGE_SHIFT));
2664#endif
2665 }
2666}
2667
2668static void __meminit zone_init_free_lists(struct zone *zone)
2669{
2670 int order, t;
2671 for_each_migratetype_order(order, t) {
2672 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
2673 zone->free_area[order].nr_free = 0;
2674 }
2675}
2676
2677#ifndef __HAVE_ARCH_MEMMAP_INIT
2678#define memmap_init(size, nid, zone, start_pfn) \
2679 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
2680#endif
2681
2682static int zone_batchsize(struct zone *zone)
2683{
2684#ifdef CONFIG_MMU
2685 int batch;
2686
2687
2688
2689
2690
2691
2692
2693 batch = zone->present_pages / 1024;
2694 if (batch * PAGE_SIZE > 512 * 1024)
2695 batch = (512 * 1024) / PAGE_SIZE;
2696 batch /= 4;
2697 if (batch < 1)
2698 batch = 1;
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710 batch = rounddown_pow_of_two(batch + batch/2) - 1;
2711
2712 return batch;
2713
2714#else
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728 return 0;
2729#endif
2730}
2731
2732static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2733{
2734 struct per_cpu_pages *pcp;
2735
2736 memset(p, 0, sizeof(*p));
2737
2738 pcp = &p->pcp;
2739 pcp->count = 0;
2740 pcp->high = 6 * batch;
2741 pcp->batch = max(1UL, 1 * batch);
2742 INIT_LIST_HEAD(&pcp->list);
2743}
2744
2745
2746
2747
2748
2749
2750static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2751 unsigned long high)
2752{
2753 struct per_cpu_pages *pcp;
2754
2755 pcp = &p->pcp;
2756 pcp->high = high;
2757 pcp->batch = max(1UL, high/4);
2758 if ((high/4) > (PAGE_SHIFT * 8))
2759 pcp->batch = PAGE_SHIFT * 8;
2760}
2761
2762
2763#ifdef CONFIG_NUMA
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781static struct per_cpu_pageset boot_pageset[NR_CPUS];
2782
2783
2784
2785
2786
2787static int __cpuinit process_zones(int cpu)
2788{
2789 struct zone *zone, *dzone;
2790 int node = cpu_to_node(cpu);
2791
2792 node_set_state(node, N_CPU);
2793
2794 for_each_populated_zone(zone) {
2795 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
2796 GFP_KERNEL, node);
2797 if (!zone_pcp(zone, cpu))
2798 goto bad;
2799
2800 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
2801
2802 if (percpu_pagelist_fraction)
2803 setup_pagelist_highmark(zone_pcp(zone, cpu),
2804 (zone->present_pages / percpu_pagelist_fraction));
2805 }
2806
2807 return 0;
2808bad:
2809 for_each_zone(dzone) {
2810 if (!populated_zone(dzone))
2811 continue;
2812 if (dzone == zone)
2813 break;
2814 kfree(zone_pcp(dzone, cpu));
2815 zone_pcp(dzone, cpu) = &boot_pageset[cpu];
2816 }
2817 return -ENOMEM;
2818}
2819
2820static inline void free_zone_pagesets(int cpu)
2821{
2822 struct zone *zone;
2823
2824 for_each_zone(zone) {
2825 struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
2826
2827
2828 if (pset != &boot_pageset[cpu])
2829 kfree(pset);
2830 zone_pcp(zone, cpu) = &boot_pageset[cpu];
2831 }
2832}
2833
2834static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
2835 unsigned long action,
2836 void *hcpu)
2837{
2838 int cpu = (long)hcpu;
2839 int ret = NOTIFY_OK;
2840
2841 switch (action) {
2842 case CPU_UP_PREPARE:
2843 case CPU_UP_PREPARE_FROZEN:
2844 if (process_zones(cpu))
2845 ret = NOTIFY_BAD;
2846 break;
2847 case CPU_UP_CANCELED:
2848 case CPU_UP_CANCELED_FROZEN:
2849 case CPU_DEAD:
2850 case CPU_DEAD_FROZEN:
2851 free_zone_pagesets(cpu);
2852 break;
2853 default:
2854 break;
2855 }
2856 return ret;
2857}
2858
2859static struct notifier_block __cpuinitdata pageset_notifier =
2860 { &pageset_cpuup_callback, NULL, 0 };
2861
2862void __init setup_per_cpu_pageset(void)
2863{
2864 int err;
2865
2866
2867
2868
2869
2870 err = process_zones(smp_processor_id());
2871 BUG_ON(err);
2872 register_cpu_notifier(&pageset_notifier);
2873}
2874
2875#endif
2876
2877static noinline __init_refok
2878int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
2879{
2880 int i;
2881 struct pglist_data *pgdat = zone->zone_pgdat;
2882 size_t alloc_size;
2883
2884
2885
2886
2887
2888 zone->wait_table_hash_nr_entries =
2889 wait_table_hash_nr_entries(zone_size_pages);
2890 zone->wait_table_bits =
2891 wait_table_bits(zone->wait_table_hash_nr_entries);
2892 alloc_size = zone->wait_table_hash_nr_entries
2893 * sizeof(wait_queue_head_t);
2894
2895 if (!slab_is_available()) {
2896 zone->wait_table = (wait_queue_head_t *)
2897 alloc_bootmem_node(pgdat, alloc_size);
2898 } else {
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909 zone->wait_table = vmalloc(alloc_size);
2910 }
2911 if (!zone->wait_table)
2912 return -ENOMEM;
2913
2914 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
2915 init_waitqueue_head(zone->wait_table + i);
2916
2917 return 0;
2918}
2919
2920static __meminit void zone_pcp_init(struct zone *zone)
2921{
2922 int cpu;
2923 unsigned long batch = zone_batchsize(zone);
2924
2925 for (cpu = 0; cpu < NR_CPUS; cpu++) {
2926#ifdef CONFIG_NUMA
2927
2928 zone_pcp(zone, cpu) = &boot_pageset[cpu];
2929 setup_pageset(&boot_pageset[cpu],0);
2930#else
2931 setup_pageset(zone_pcp(zone,cpu), batch);
2932#endif
2933 }
2934 if (zone->present_pages)
2935 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
2936 zone->name, zone->present_pages, batch);
2937}
2938
2939__meminit int init_currently_empty_zone(struct zone *zone,
2940 unsigned long zone_start_pfn,
2941 unsigned long size,
2942 enum memmap_context context)
2943{
2944 struct pglist_data *pgdat = zone->zone_pgdat;
2945 int ret;
2946 ret = zone_wait_table_init(zone, size);
2947 if (ret)
2948 return ret;
2949 pgdat->nr_zones = zone_idx(zone) + 1;
2950
2951 zone->zone_start_pfn = zone_start_pfn;
2952
2953 mminit_dprintk(MMINIT_TRACE, "memmap_init",
2954 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
2955 pgdat->node_id,
2956 (unsigned long)zone_idx(zone),
2957 zone_start_pfn, (zone_start_pfn + size));
2958
2959 zone_init_free_lists(zone);
2960
2961 return 0;
2962}
2963
2964#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2965
2966
2967
2968
2969static int __meminit first_active_region_index_in_nid(int nid)
2970{
2971 int i;
2972
2973 for (i = 0; i < nr_nodemap_entries; i++)
2974 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
2975 return i;
2976
2977 return -1;
2978}
2979
2980
2981
2982
2983
2984static int __meminit next_active_region_index_in_nid(int index, int nid)
2985{
2986 for (index = index + 1; index < nr_nodemap_entries; index++)
2987 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
2988 return index;
2989
2990 return -1;
2991}
2992
2993#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
2994
2995
2996
2997
2998
2999
3000int __meminit __early_pfn_to_nid(unsigned long pfn)
3001{
3002 int i;
3003
3004 for (i = 0; i < nr_nodemap_entries; i++) {
3005 unsigned long start_pfn = early_node_map[i].start_pfn;
3006 unsigned long end_pfn = early_node_map[i].end_pfn;
3007
3008 if (start_pfn <= pfn && pfn < end_pfn)
3009 return early_node_map[i].nid;
3010 }
3011
3012 return -1;
3013}
3014#endif
3015
3016int __meminit early_pfn_to_nid(unsigned long pfn)
3017{
3018 int nid;
3019
3020 nid = __early_pfn_to_nid(pfn);
3021 if (nid >= 0)
3022 return nid;
3023
3024 return 0;
3025}
3026
3027#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3028bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3029{
3030 int nid;
3031
3032 nid = __early_pfn_to_nid(pfn);
3033 if (nid >= 0 && nid != node)
3034 return false;
3035 return true;
3036}
3037#endif
3038
3039
3040#define for_each_active_range_index_in_nid(i, nid) \
3041 for (i = first_active_region_index_in_nid(nid); i != -1; \
3042 i = next_active_region_index_in_nid(i, nid))
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053void __init free_bootmem_with_active_regions(int nid,
3054 unsigned long max_low_pfn)
3055{
3056 int i;
3057
3058 for_each_active_range_index_in_nid(i, nid) {
3059 unsigned long size_pages = 0;
3060 unsigned long end_pfn = early_node_map[i].end_pfn;
3061
3062 if (early_node_map[i].start_pfn >= max_low_pfn)
3063 continue;
3064
3065 if (end_pfn > max_low_pfn)
3066 end_pfn = max_low_pfn;
3067
3068 size_pages = end_pfn - early_node_map[i].start_pfn;
3069 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3070 PFN_PHYS(early_node_map[i].start_pfn),
3071 size_pages << PAGE_SHIFT);
3072 }
3073}
3074
3075void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3076{
3077 int i;
3078 int ret;
3079
3080 for_each_active_range_index_in_nid(i, nid) {
3081 ret = work_fn(early_node_map[i].start_pfn,
3082 early_node_map[i].end_pfn, data);
3083 if (ret)
3084 break;
3085 }
3086}
3087
3088
3089
3090
3091
3092
3093
3094
3095void __init sparse_memory_present_with_active_regions(int nid)
3096{
3097 int i;
3098
3099 for_each_active_range_index_in_nid(i, nid)
3100 memory_present(early_node_map[i].nid,
3101 early_node_map[i].start_pfn,
3102 early_node_map[i].end_pfn);
3103}
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
3118void __init push_node_boundaries(unsigned int nid,
3119 unsigned long start_pfn, unsigned long end_pfn)
3120{
3121 mminit_dprintk(MMINIT_TRACE, "zoneboundary",
3122 "Entering push_node_boundaries(%u, %lu, %lu)\n",
3123 nid, start_pfn, end_pfn);
3124
3125
3126 if (node_boundary_end_pfn[nid] == 0)
3127 node_boundary_start_pfn[nid] = -1UL;
3128
3129
3130 if (node_boundary_start_pfn[nid] > start_pfn)
3131 node_boundary_start_pfn[nid] = start_pfn;
3132 if (node_boundary_end_pfn[nid] < end_pfn)
3133 node_boundary_end_pfn[nid] = end_pfn;
3134}
3135
3136
3137static void __meminit account_node_boundary(unsigned int nid,
3138 unsigned long *start_pfn, unsigned long *end_pfn)
3139{
3140 mminit_dprintk(MMINIT_TRACE, "zoneboundary",
3141 "Entering account_node_boundary(%u, %lu, %lu)\n",
3142 nid, *start_pfn, *end_pfn);
3143
3144
3145 if (node_boundary_end_pfn[nid] == 0)
3146 return;
3147
3148
3149 if (node_boundary_start_pfn[nid] < *start_pfn)
3150 *start_pfn = node_boundary_start_pfn[nid];
3151 if (node_boundary_end_pfn[nid] > *end_pfn)
3152 *end_pfn = node_boundary_end_pfn[nid];
3153}
3154#else
3155void __init push_node_boundaries(unsigned int nid,
3156 unsigned long start_pfn, unsigned long end_pfn) {}
3157
3158static void __meminit account_node_boundary(unsigned int nid,
3159 unsigned long *start_pfn, unsigned long *end_pfn) {}
3160#endif
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174void __meminit get_pfn_range_for_nid(unsigned int nid,
3175 unsigned long *start_pfn, unsigned long *end_pfn)
3176{
3177 int i;
3178 *start_pfn = -1UL;
3179 *end_pfn = 0;
3180
3181 for_each_active_range_index_in_nid(i, nid) {
3182 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3183 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3184 }
3185
3186 if (*start_pfn == -1UL)
3187 *start_pfn = 0;
3188
3189
3190 account_node_boundary(nid, start_pfn, end_pfn);
3191}
3192
3193
3194
3195
3196
3197
3198static void __init find_usable_zone_for_movable(void)
3199{
3200 int zone_index;
3201 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3202 if (zone_index == ZONE_MOVABLE)
3203 continue;
3204
3205 if (arch_zone_highest_possible_pfn[zone_index] >
3206 arch_zone_lowest_possible_pfn[zone_index])
3207 break;
3208 }
3209
3210 VM_BUG_ON(zone_index == -1);
3211 movable_zone = zone_index;
3212}
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224static void __meminit adjust_zone_range_for_zone_movable(int nid,
3225 unsigned long zone_type,
3226 unsigned long node_start_pfn,
3227 unsigned long node_end_pfn,
3228 unsigned long *zone_start_pfn,
3229 unsigned long *zone_end_pfn)
3230{
3231
3232 if (zone_movable_pfn[nid]) {
3233
3234 if (zone_type == ZONE_MOVABLE) {
3235 *zone_start_pfn = zone_movable_pfn[nid];
3236 *zone_end_pfn = min(node_end_pfn,
3237 arch_zone_highest_possible_pfn[movable_zone]);
3238
3239
3240 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3241 *zone_end_pfn > zone_movable_pfn[nid]) {
3242 *zone_end_pfn = zone_movable_pfn[nid];
3243
3244
3245 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3246 *zone_start_pfn = *zone_end_pfn;
3247 }
3248}
3249
3250
3251
3252
3253
3254static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3255 unsigned long zone_type,
3256 unsigned long *ignored)
3257{
3258 unsigned long node_start_pfn, node_end_pfn;
3259 unsigned long zone_start_pfn, zone_end_pfn;
3260
3261
3262 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3263 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3264 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3265 adjust_zone_range_for_zone_movable(nid, zone_type,
3266 node_start_pfn, node_end_pfn,
3267 &zone_start_pfn, &zone_end_pfn);
3268
3269
3270 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3271 return 0;
3272
3273
3274 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3275 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3276
3277
3278 return zone_end_pfn - zone_start_pfn;
3279}
3280
3281
3282
3283
3284
3285static unsigned long __meminit __absent_pages_in_range(int nid,
3286 unsigned long range_start_pfn,
3287 unsigned long range_end_pfn)
3288{
3289 int i = 0;
3290 unsigned long prev_end_pfn = 0, hole_pages = 0;
3291 unsigned long start_pfn;
3292
3293
3294 i = first_active_region_index_in_nid(nid);
3295 if (i == -1)
3296 return 0;
3297
3298 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3299
3300
3301 if (early_node_map[i].start_pfn > range_start_pfn)
3302 hole_pages = prev_end_pfn - range_start_pfn;
3303
3304
3305 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3306
3307
3308 if (prev_end_pfn >= range_end_pfn)
3309 break;
3310
3311
3312 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3313 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3314
3315
3316 if (start_pfn > range_start_pfn) {
3317 BUG_ON(prev_end_pfn > start_pfn);
3318 hole_pages += start_pfn - prev_end_pfn;
3319 }
3320 prev_end_pfn = early_node_map[i].end_pfn;
3321 }
3322
3323
3324 if (range_end_pfn > prev_end_pfn)
3325 hole_pages += range_end_pfn -
3326 max(range_start_pfn, prev_end_pfn);
3327
3328 return hole_pages;
3329}
3330
3331
3332
3333
3334
3335
3336
3337
3338unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3339 unsigned long end_pfn)
3340{
3341 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3342}
3343
3344
3345static unsigned long __meminit zone_absent_pages_in_node(int nid,
3346 unsigned long zone_type,
3347 unsigned long *ignored)
3348{
3349 unsigned long node_start_pfn, node_end_pfn;
3350 unsigned long zone_start_pfn, zone_end_pfn;
3351
3352 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3353 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3354 node_start_pfn);
3355 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3356 node_end_pfn);
3357
3358 adjust_zone_range_for_zone_movable(nid, zone_type,
3359 node_start_pfn, node_end_pfn,
3360 &zone_start_pfn, &zone_end_pfn);
3361 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3362}
3363
3364#else
3365static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3366 unsigned long zone_type,
3367 unsigned long *zones_size)
3368{
3369 return zones_size[zone_type];
3370}
3371
3372static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3373 unsigned long zone_type,
3374 unsigned long *zholes_size)
3375{
3376 if (!zholes_size)
3377 return 0;
3378
3379 return zholes_size[zone_type];
3380}
3381
3382#endif
3383
3384static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3385 unsigned long *zones_size, unsigned long *zholes_size)
3386{
3387 unsigned long realtotalpages, totalpages = 0;
3388 enum zone_type i;
3389
3390 for (i = 0; i < MAX_NR_ZONES; i++)
3391 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3392 zones_size);
3393 pgdat->node_spanned_pages = totalpages;
3394
3395 realtotalpages = totalpages;
3396 for (i = 0; i < MAX_NR_ZONES; i++)
3397 realtotalpages -=
3398 zone_absent_pages_in_node(pgdat->node_id, i,
3399 zholes_size);
3400 pgdat->node_present_pages = realtotalpages;
3401 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3402 realtotalpages);
3403}
3404
3405#ifndef CONFIG_SPARSEMEM
3406
3407
3408
3409
3410
3411
3412
3413static unsigned long __init usemap_size(unsigned long zonesize)
3414{
3415 unsigned long usemapsize;
3416
3417 usemapsize = roundup(zonesize, pageblock_nr_pages);
3418 usemapsize = usemapsize >> pageblock_order;
3419 usemapsize *= NR_PAGEBLOCK_BITS;
3420 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3421
3422 return usemapsize / 8;
3423}
3424
3425static void __init setup_usemap(struct pglist_data *pgdat,
3426 struct zone *zone, unsigned long zonesize)
3427{
3428 unsigned long usemapsize = usemap_size(zonesize);
3429 zone->pageblock_flags = NULL;
3430 if (usemapsize)
3431 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3432}
3433#else
3434static void inline setup_usemap(struct pglist_data *pgdat,
3435 struct zone *zone, unsigned long zonesize) {}
3436#endif
3437
3438#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3439
3440
3441static inline int pageblock_default_order(void)
3442{
3443 if (HPAGE_SHIFT > PAGE_SHIFT)
3444 return HUGETLB_PAGE_ORDER;
3445
3446 return MAX_ORDER-1;
3447}
3448
3449
3450static inline void __init set_pageblock_order(unsigned int order)
3451{
3452
3453 if (pageblock_order)
3454 return;
3455
3456
3457
3458
3459
3460 pageblock_order = order;
3461}
3462#else
3463
3464
3465
3466
3467
3468
3469
3470static inline int pageblock_default_order(unsigned int order)
3471{
3472 return MAX_ORDER-1;
3473}
3474#define set_pageblock_order(x) do {} while (0)
3475
3476#endif
3477
3478
3479
3480
3481
3482
3483
3484static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3485 unsigned long *zones_size, unsigned long *zholes_size)
3486{
3487 enum zone_type j;
3488 int nid = pgdat->node_id;
3489 unsigned long zone_start_pfn = pgdat->node_start_pfn;
3490 int ret;
3491
3492 pgdat_resize_init(pgdat);
3493 pgdat->nr_zones = 0;
3494 init_waitqueue_head(&pgdat->kswapd_wait);
3495 pgdat->kswapd_max_order = 0;
3496 pgdat_page_cgroup_init(pgdat);
3497
3498 for (j = 0; j < MAX_NR_ZONES; j++) {
3499 struct zone *zone = pgdat->node_zones + j;
3500 unsigned long size, realsize, memmap_pages;
3501 enum lru_list l;
3502
3503 size = zone_spanned_pages_in_node(nid, j, zones_size);
3504 realsize = size - zone_absent_pages_in_node(nid, j,
3505 zholes_size);
3506
3507
3508
3509
3510
3511
3512 memmap_pages =
3513 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3514 if (realsize >= memmap_pages) {
3515 realsize -= memmap_pages;
3516 if (memmap_pages)
3517 printk(KERN_DEBUG
3518 " %s zone: %lu pages used for memmap\n",
3519 zone_names[j], memmap_pages);
3520 } else
3521 printk(KERN_WARNING
3522 " %s zone: %lu pages exceeds realsize %lu\n",
3523 zone_names[j], memmap_pages, realsize);
3524
3525
3526 if (j == 0 && realsize > dma_reserve) {
3527 realsize -= dma_reserve;
3528 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
3529 zone_names[0], dma_reserve);
3530 }
3531
3532 if (!is_highmem_idx(j))
3533 nr_kernel_pages += realsize;
3534 nr_all_pages += realsize;
3535
3536 zone->spanned_pages = size;
3537 zone->present_pages = realsize;
3538#ifdef CONFIG_NUMA
3539 zone->node = nid;
3540 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3541 / 100;
3542 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3543#endif
3544 zone->name = zone_names[j];
3545 spin_lock_init(&zone->lock);
3546 spin_lock_init(&zone->lru_lock);
3547 zone_seqlock_init(zone);
3548 zone->zone_pgdat = pgdat;
3549
3550 zone->prev_priority = DEF_PRIORITY;
3551
3552 zone_pcp_init(zone);
3553 for_each_lru(l) {
3554 INIT_LIST_HEAD(&zone->lru[l].list);
3555 zone->lru[l].nr_scan = 0;
3556 }
3557 zone->reclaim_stat.recent_rotated[0] = 0;
3558 zone->reclaim_stat.recent_rotated[1] = 0;
3559 zone->reclaim_stat.recent_scanned[0] = 0;
3560 zone->reclaim_stat.recent_scanned[1] = 0;
3561 zap_zone_vm_stats(zone);
3562 zone->flags = 0;
3563 if (!size)
3564 continue;
3565
3566 set_pageblock_order(pageblock_default_order());
3567 setup_usemap(pgdat, zone, size);
3568 ret = init_currently_empty_zone(zone, zone_start_pfn,
3569 size, MEMMAP_EARLY);
3570 BUG_ON(ret);
3571 memmap_init(size, nid, j, zone_start_pfn);
3572 zone_start_pfn += size;
3573 }
3574}
3575
3576static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3577{
3578
3579 if (!pgdat->node_spanned_pages)
3580 return;
3581
3582#ifdef CONFIG_FLAT_NODE_MEM_MAP
3583
3584 if (!pgdat->node_mem_map) {
3585 unsigned long size, start, end;
3586 struct page *map;
3587
3588
3589
3590
3591
3592
3593 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3594 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3595 end = ALIGN(end, MAX_ORDER_NR_PAGES);
3596 size = (end - start) * sizeof(struct page);
3597 map = alloc_remap(pgdat->node_id, size);
3598 if (!map)
3599 map = alloc_bootmem_node(pgdat, size);
3600 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3601 }
3602#ifndef CONFIG_NEED_MULTIPLE_NODES
3603
3604
3605
3606 if (pgdat == NODE_DATA(0)) {
3607 mem_map = NODE_DATA(0)->node_mem_map;
3608#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3609 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3610 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3611#endif
3612 }
3613#endif
3614#endif
3615}
3616
3617void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3618 unsigned long node_start_pfn, unsigned long *zholes_size)
3619{
3620 pg_data_t *pgdat = NODE_DATA(nid);
3621
3622 pgdat->node_id = nid;
3623 pgdat->node_start_pfn = node_start_pfn;
3624 calculate_node_totalpages(pgdat, zones_size, zholes_size);
3625
3626 alloc_node_mem_map(pgdat);
3627#ifdef CONFIG_FLAT_NODE_MEM_MAP
3628 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3629 nid, (unsigned long)pgdat,
3630 (unsigned long)pgdat->node_mem_map);
3631#endif
3632
3633 free_area_init_core(pgdat, zones_size, zholes_size);
3634}
3635
3636#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3637
3638#if MAX_NUMNODES > 1
3639
3640
3641
3642static void __init setup_nr_node_ids(void)
3643{
3644 unsigned int node;
3645 unsigned int highest = 0;
3646
3647 for_each_node_mask(node, node_possible_map)
3648 highest = node;
3649 nr_node_ids = highest + 1;
3650}
3651#else
3652static inline void setup_nr_node_ids(void)
3653{
3654}
3655#endif
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3670 unsigned long end_pfn)
3671{
3672 int i;
3673
3674 mminit_dprintk(MMINIT_TRACE, "memory_register",
3675 "Entering add_active_range(%d, %#lx, %#lx) "
3676 "%d entries of %d used\n",
3677 nid, start_pfn, end_pfn,
3678 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3679
3680 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3681
3682
3683 for (i = 0; i < nr_nodemap_entries; i++) {
3684 if (early_node_map[i].nid != nid)
3685 continue;
3686
3687
3688 if (start_pfn >= early_node_map[i].start_pfn &&
3689 end_pfn <= early_node_map[i].end_pfn)
3690 return;
3691
3692
3693 if (start_pfn <= early_node_map[i].end_pfn &&
3694 end_pfn > early_node_map[i].end_pfn) {
3695 early_node_map[i].end_pfn = end_pfn;
3696 return;
3697 }
3698
3699
3700 if (start_pfn < early_node_map[i].end_pfn &&
3701 end_pfn >= early_node_map[i].start_pfn) {
3702 early_node_map[i].start_pfn = start_pfn;
3703 return;
3704 }
3705 }
3706
3707
3708 if (i >= MAX_ACTIVE_REGIONS) {
3709 printk(KERN_CRIT "More than %d memory regions, truncating\n",
3710 MAX_ACTIVE_REGIONS);
3711 return;
3712 }
3713
3714 early_node_map[i].nid = nid;
3715 early_node_map[i].start_pfn = start_pfn;
3716 early_node_map[i].end_pfn = end_pfn;
3717 nr_nodemap_entries = i + 1;
3718}
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
3732 unsigned long end_pfn)
3733{
3734 int i, j;
3735 int removed = 0;
3736
3737 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
3738 nid, start_pfn, end_pfn);
3739
3740
3741 for_each_active_range_index_in_nid(i, nid) {
3742 if (early_node_map[i].start_pfn >= start_pfn &&
3743 early_node_map[i].end_pfn <= end_pfn) {
3744
3745 early_node_map[i].start_pfn = 0;
3746 early_node_map[i].end_pfn = 0;
3747 removed = 1;
3748 continue;
3749 }
3750 if (early_node_map[i].start_pfn < start_pfn &&
3751 early_node_map[i].end_pfn > start_pfn) {
3752 unsigned long temp_end_pfn = early_node_map[i].end_pfn;
3753 early_node_map[i].end_pfn = start_pfn;
3754 if (temp_end_pfn > end_pfn)
3755 add_active_range(nid, end_pfn, temp_end_pfn);
3756 continue;
3757 }
3758 if (early_node_map[i].start_pfn >= start_pfn &&
3759 early_node_map[i].end_pfn > end_pfn &&
3760 early_node_map[i].start_pfn < end_pfn) {
3761 early_node_map[i].start_pfn = end_pfn;
3762 continue;
3763 }
3764 }
3765
3766 if (!removed)
3767 return;
3768
3769
3770 for (i = nr_nodemap_entries - 1; i > 0; i--) {
3771 if (early_node_map[i].nid != nid)
3772 continue;
3773 if (early_node_map[i].end_pfn)
3774 continue;
3775
3776 for (j = i; j < nr_nodemap_entries - 1; j++)
3777 memcpy(&early_node_map[j], &early_node_map[j+1],
3778 sizeof(early_node_map[j]));
3779 j = nr_nodemap_entries - 1;
3780 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
3781 nr_nodemap_entries--;
3782 }
3783}
3784
3785
3786
3787
3788
3789
3790
3791
3792void __init remove_all_active_ranges(void)
3793{
3794 memset(early_node_map, 0, sizeof(early_node_map));
3795 nr_nodemap_entries = 0;
3796#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
3797 memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
3798 memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
3799#endif
3800}
3801
3802
3803static int __init cmp_node_active_region(const void *a, const void *b)
3804{
3805 struct node_active_region *arange = (struct node_active_region *)a;
3806 struct node_active_region *brange = (struct node_active_region *)b;
3807
3808
3809 if (arange->start_pfn > brange->start_pfn)
3810 return 1;
3811 if (arange->start_pfn < brange->start_pfn)
3812 return -1;
3813
3814 return 0;
3815}
3816
3817
3818static void __init sort_node_map(void)
3819{
3820 sort(early_node_map, (size_t)nr_nodemap_entries,
3821 sizeof(struct node_active_region),
3822 cmp_node_active_region, NULL);
3823}
3824
3825
3826static unsigned long __init find_min_pfn_for_node(int nid)
3827{
3828 int i;
3829 unsigned long min_pfn = ULONG_MAX;
3830
3831
3832 for_each_active_range_index_in_nid(i, nid)
3833 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
3834
3835 if (min_pfn == ULONG_MAX) {
3836 printk(KERN_WARNING
3837 "Could not find start_pfn for node %d\n", nid);
3838 return 0;
3839 }
3840
3841 return min_pfn;
3842}
3843
3844
3845
3846
3847
3848
3849
3850unsigned long __init find_min_pfn_with_active_regions(void)
3851{
3852 return find_min_pfn_for_node(MAX_NUMNODES);
3853}
3854
3855
3856
3857
3858
3859
3860static unsigned long __init early_calculate_totalpages(void)
3861{
3862 int i;
3863 unsigned long totalpages = 0;
3864
3865 for (i = 0; i < nr_nodemap_entries; i++) {
3866 unsigned long pages = early_node_map[i].end_pfn -
3867 early_node_map[i].start_pfn;
3868 totalpages += pages;
3869 if (pages)
3870 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
3871 }
3872 return totalpages;
3873}
3874
3875
3876
3877
3878
3879
3880
3881static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3882{
3883 int i, nid;
3884 unsigned long usable_startpfn;
3885 unsigned long kernelcore_node, kernelcore_remaining;
3886 unsigned long totalpages = early_calculate_totalpages();
3887 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897 if (required_movablecore) {
3898 unsigned long corepages;
3899
3900
3901
3902
3903
3904 required_movablecore =
3905 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
3906 corepages = totalpages - required_movablecore;
3907
3908 required_kernelcore = max(required_kernelcore, corepages);
3909 }
3910
3911
3912 if (!required_kernelcore)
3913 return;
3914
3915
3916 find_usable_zone_for_movable();
3917 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
3918
3919restart:
3920
3921 kernelcore_node = required_kernelcore / usable_nodes;
3922 for_each_node_state(nid, N_HIGH_MEMORY) {
3923
3924
3925
3926
3927
3928 if (required_kernelcore < kernelcore_node)
3929 kernelcore_node = required_kernelcore / usable_nodes;
3930
3931
3932
3933
3934
3935
3936 kernelcore_remaining = kernelcore_node;
3937
3938
3939 for_each_active_range_index_in_nid(i, nid) {
3940 unsigned long start_pfn, end_pfn;
3941 unsigned long size_pages;
3942
3943 start_pfn = max(early_node_map[i].start_pfn,
3944 zone_movable_pfn[nid]);
3945 end_pfn = early_node_map[i].end_pfn;
3946 if (start_pfn >= end_pfn)
3947 continue;
3948
3949
3950 if (start_pfn < usable_startpfn) {
3951 unsigned long kernel_pages;
3952 kernel_pages = min(end_pfn, usable_startpfn)
3953 - start_pfn;
3954
3955 kernelcore_remaining -= min(kernel_pages,
3956 kernelcore_remaining);
3957 required_kernelcore -= min(kernel_pages,
3958 required_kernelcore);
3959
3960
3961 if (end_pfn <= usable_startpfn) {
3962
3963
3964
3965
3966
3967
3968
3969 zone_movable_pfn[nid] = end_pfn;
3970 continue;
3971 }
3972 start_pfn = usable_startpfn;
3973 }
3974
3975
3976
3977
3978
3979
3980 size_pages = end_pfn - start_pfn;
3981 if (size_pages > kernelcore_remaining)
3982 size_pages = kernelcore_remaining;
3983 zone_movable_pfn[nid] = start_pfn + size_pages;
3984
3985
3986
3987
3988
3989
3990 required_kernelcore -= min(required_kernelcore,
3991 size_pages);
3992 kernelcore_remaining -= size_pages;
3993 if (!kernelcore_remaining)
3994 break;
3995 }
3996 }
3997
3998
3999
4000
4001
4002
4003
4004 usable_nodes--;
4005 if (usable_nodes && required_kernelcore > usable_nodes)
4006 goto restart;
4007
4008
4009 for (nid = 0; nid < MAX_NUMNODES; nid++)
4010 zone_movable_pfn[nid] =
4011 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4012}
4013
4014
4015static void check_for_regular_memory(pg_data_t *pgdat)
4016{
4017#ifdef CONFIG_HIGHMEM
4018 enum zone_type zone_type;
4019
4020 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4021 struct zone *zone = &pgdat->node_zones[zone_type];
4022 if (zone->present_pages)
4023 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4024 }
4025#endif
4026}
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4042{
4043 unsigned long nid;
4044 int i;
4045
4046
4047 sort_node_map();
4048
4049
4050 memset(arch_zone_lowest_possible_pfn, 0,
4051 sizeof(arch_zone_lowest_possible_pfn));
4052 memset(arch_zone_highest_possible_pfn, 0,
4053 sizeof(arch_zone_highest_possible_pfn));
4054 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4055 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4056 for (i = 1; i < MAX_NR_ZONES; i++) {
4057 if (i == ZONE_MOVABLE)
4058 continue;
4059 arch_zone_lowest_possible_pfn[i] =
4060 arch_zone_highest_possible_pfn[i-1];
4061 arch_zone_highest_possible_pfn[i] =
4062 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4063 }
4064 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4065 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4066
4067
4068 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4069 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4070
4071
4072 printk("Zone PFN ranges:\n");
4073 for (i = 0; i < MAX_NR_ZONES; i++) {
4074 if (i == ZONE_MOVABLE)
4075 continue;
4076 printk(" %-8s %0#10lx -> %0#10lx\n",
4077 zone_names[i],
4078 arch_zone_lowest_possible_pfn[i],
4079 arch_zone_highest_possible_pfn[i]);
4080 }
4081
4082
4083 printk("Movable zone start PFN for each node\n");
4084 for (i = 0; i < MAX_NUMNODES; i++) {
4085 if (zone_movable_pfn[i])
4086 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
4087 }
4088
4089
4090 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4091 for (i = 0; i < nr_nodemap_entries; i++)
4092 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4093 early_node_map[i].start_pfn,
4094 early_node_map[i].end_pfn);
4095
4096
4097 mminit_verify_pageflags_layout();
4098 setup_nr_node_ids();
4099 for_each_online_node(nid) {
4100 pg_data_t *pgdat = NODE_DATA(nid);
4101 free_area_init_node(nid, NULL,
4102 find_min_pfn_for_node(nid), NULL);
4103
4104
4105 if (pgdat->node_present_pages)
4106 node_set_state(nid, N_HIGH_MEMORY);
4107 check_for_regular_memory(pgdat);
4108 }
4109}
4110
4111static int __init cmdline_parse_core(char *p, unsigned long *core)
4112{
4113 unsigned long long coremem;
4114 if (!p)
4115 return -EINVAL;
4116
4117 coremem = memparse(p, &p);
4118 *core = coremem >> PAGE_SHIFT;
4119
4120
4121 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4122
4123 return 0;
4124}
4125
4126
4127
4128
4129
4130static int __init cmdline_parse_kernelcore(char *p)
4131{
4132 return cmdline_parse_core(p, &required_kernelcore);
4133}
4134
4135
4136
4137
4138
4139static int __init cmdline_parse_movablecore(char *p)
4140{
4141 return cmdline_parse_core(p, &required_movablecore);
4142}
4143
4144early_param("kernelcore", cmdline_parse_kernelcore);
4145early_param("movablecore", cmdline_parse_movablecore);
4146
4147#endif
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160void __init set_dma_reserve(unsigned long new_dma_reserve)
4161{
4162 dma_reserve = new_dma_reserve;
4163}
4164
4165#ifndef CONFIG_NEED_MULTIPLE_NODES
4166struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4167EXPORT_SYMBOL(contig_page_data);
4168#endif
4169
4170void __init free_area_init(unsigned long *zones_size)
4171{
4172 free_area_init_node(0, zones_size,
4173 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4174}
4175
4176static int page_alloc_cpu_notify(struct notifier_block *self,
4177 unsigned long action, void *hcpu)
4178{
4179 int cpu = (unsigned long)hcpu;
4180
4181 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4182 drain_pages(cpu);
4183
4184
4185
4186
4187
4188
4189
4190 vm_events_fold_cpu(cpu);
4191
4192
4193
4194
4195
4196
4197
4198
4199 refresh_cpu_vm_stats(cpu);
4200 }
4201 return NOTIFY_OK;
4202}
4203
4204void __init page_alloc_init(void)
4205{
4206 hotcpu_notifier(page_alloc_cpu_notify, 0);
4207}
4208
4209
4210
4211
4212
4213static void calculate_totalreserve_pages(void)
4214{
4215 struct pglist_data *pgdat;
4216 unsigned long reserve_pages = 0;
4217 enum zone_type i, j;
4218
4219 for_each_online_pgdat(pgdat) {
4220 for (i = 0; i < MAX_NR_ZONES; i++) {
4221 struct zone *zone = pgdat->node_zones + i;
4222 unsigned long max = 0;
4223
4224
4225 for (j = i; j < MAX_NR_ZONES; j++) {
4226 if (zone->lowmem_reserve[j] > max)
4227 max = zone->lowmem_reserve[j];
4228 }
4229
4230
4231 max += zone->pages_high;
4232
4233 if (max > zone->present_pages)
4234 max = zone->present_pages;
4235 reserve_pages += max;
4236 }
4237 }
4238 totalreserve_pages = reserve_pages;
4239}
4240
4241
4242
4243
4244
4245
4246
4247static void setup_per_zone_lowmem_reserve(void)
4248{
4249 struct pglist_data *pgdat;
4250 enum zone_type j, idx;
4251
4252 for_each_online_pgdat(pgdat) {
4253 for (j = 0; j < MAX_NR_ZONES; j++) {
4254 struct zone *zone = pgdat->node_zones + j;
4255 unsigned long present_pages = zone->present_pages;
4256
4257 zone->lowmem_reserve[j] = 0;
4258
4259 idx = j;
4260 while (idx) {
4261 struct zone *lower_zone;
4262
4263 idx--;
4264
4265 if (sysctl_lowmem_reserve_ratio[idx] < 1)
4266 sysctl_lowmem_reserve_ratio[idx] = 1;
4267
4268 lower_zone = pgdat->node_zones + idx;
4269 lower_zone->lowmem_reserve[j] = present_pages /
4270 sysctl_lowmem_reserve_ratio[idx];
4271 present_pages += lower_zone->present_pages;
4272 }
4273 }
4274 }
4275
4276
4277 calculate_totalreserve_pages();
4278}
4279
4280
4281
4282
4283
4284
4285
4286void setup_per_zone_pages_min(void)
4287{
4288 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4289 unsigned long lowmem_pages = 0;
4290 struct zone *zone;
4291 unsigned long flags;
4292
4293
4294 for_each_zone(zone) {
4295 if (!is_highmem(zone))
4296 lowmem_pages += zone->present_pages;
4297 }
4298
4299 for_each_zone(zone) {
4300 u64 tmp;
4301
4302 spin_lock_irqsave(&zone->lock, flags);
4303 tmp = (u64)pages_min * zone->present_pages;
4304 do_div(tmp, lowmem_pages);
4305 if (is_highmem(zone)) {
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315 int min_pages;
4316
4317 min_pages = zone->present_pages / 1024;
4318 if (min_pages < SWAP_CLUSTER_MAX)
4319 min_pages = SWAP_CLUSTER_MAX;
4320 if (min_pages > 128)
4321 min_pages = 128;
4322 zone->pages_min = min_pages;
4323 } else {
4324
4325
4326
4327
4328 zone->pages_min = tmp;
4329 }
4330
4331 zone->pages_low = zone->pages_min + (tmp >> 2);
4332 zone->pages_high = zone->pages_min + (tmp >> 1);
4333 setup_zone_migrate_reserve(zone);
4334 spin_unlock_irqrestore(&zone->lock, flags);
4335 }
4336
4337
4338 calculate_totalreserve_pages();
4339}
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364static void setup_per_zone_inactive_ratio(void)
4365{
4366 struct zone *zone;
4367
4368 for_each_zone(zone) {
4369 unsigned int gb, ratio;
4370
4371
4372 gb = zone->present_pages >> (30 - PAGE_SHIFT);
4373 ratio = int_sqrt(10 * gb);
4374 if (!ratio)
4375 ratio = 1;
4376
4377 zone->inactive_ratio = ratio;
4378 }
4379}
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405static int __init init_per_zone_pages_min(void)
4406{
4407 unsigned long lowmem_kbytes;
4408
4409 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4410
4411 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4412 if (min_free_kbytes < 128)
4413 min_free_kbytes = 128;
4414 if (min_free_kbytes > 65536)
4415 min_free_kbytes = 65536;
4416 setup_per_zone_pages_min();
4417 setup_per_zone_lowmem_reserve();
4418 setup_per_zone_inactive_ratio();
4419 return 0;
4420}
4421module_init(init_per_zone_pages_min)
4422
4423
4424
4425
4426
4427
4428int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4429 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4430{
4431 proc_dointvec(table, write, file, buffer, length, ppos);
4432 if (write)
4433 setup_per_zone_pages_min();
4434 return 0;
4435}
4436
4437#ifdef CONFIG_NUMA
4438int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4439 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4440{
4441 struct zone *zone;
4442 int rc;
4443
4444 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4445 if (rc)
4446 return rc;
4447
4448 for_each_zone(zone)
4449 zone->min_unmapped_pages = (zone->present_pages *
4450 sysctl_min_unmapped_ratio) / 100;
4451 return 0;
4452}
4453
4454int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4455 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4456{
4457 struct zone *zone;
4458 int rc;
4459
4460 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4461 if (rc)
4462 return rc;
4463
4464 for_each_zone(zone)
4465 zone->min_slab_pages = (zone->present_pages *
4466 sysctl_min_slab_ratio) / 100;
4467 return 0;
4468}
4469#endif
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4481 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4482{
4483 proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4484 setup_per_zone_lowmem_reserve();
4485 return 0;
4486}
4487
4488
4489
4490
4491
4492
4493
4494int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4495 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4496{
4497 struct zone *zone;
4498 unsigned int cpu;
4499 int ret;
4500
4501 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4502 if (!write || (ret == -EINVAL))
4503 return ret;
4504 for_each_populated_zone(zone) {
4505 for_each_online_cpu(cpu) {
4506 unsigned long high;
4507 high = zone->present_pages / percpu_pagelist_fraction;
4508 setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4509 }
4510 }
4511 return 0;
4512}
4513
4514int hashdist = HASHDIST_DEFAULT;
4515
4516#ifdef CONFIG_NUMA
4517static int __init set_hashdist(char *str)
4518{
4519 if (!str)
4520 return 0;
4521 hashdist = simple_strtoul(str, &str, 0);
4522 return 1;
4523}
4524__setup("hashdist=", set_hashdist);
4525#endif
4526
4527
4528
4529
4530
4531
4532
4533void *__init alloc_large_system_hash(const char *tablename,
4534 unsigned long bucketsize,
4535 unsigned long numentries,
4536 int scale,
4537 int flags,
4538 unsigned int *_hash_shift,
4539 unsigned int *_hash_mask,
4540 unsigned long limit)
4541{
4542 unsigned long long max = limit;
4543 unsigned long log2qty, size;
4544 void *table = NULL;
4545
4546
4547 if (!numentries) {
4548
4549 numentries = nr_kernel_pages;
4550 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4551 numentries >>= 20 - PAGE_SHIFT;
4552 numentries <<= 20 - PAGE_SHIFT;
4553
4554
4555 if (scale > PAGE_SHIFT)
4556 numentries >>= (scale - PAGE_SHIFT);
4557 else
4558 numentries <<= (PAGE_SHIFT - scale);
4559
4560
4561 if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4562 numentries = PAGE_SIZE / bucketsize;
4563 }
4564 numentries = roundup_pow_of_two(numentries);
4565
4566
4567 if (max == 0) {
4568 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4569 do_div(max, bucketsize);
4570 }
4571
4572 if (numentries > max)
4573 numentries = max;
4574
4575 log2qty = ilog2(numentries);
4576
4577 do {
4578 size = bucketsize << log2qty;
4579 if (flags & HASH_EARLY)
4580 table = alloc_bootmem_nopanic(size);
4581 else if (hashdist)
4582 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4583 else {
4584 unsigned long order = get_order(size);
4585 table = (void*) __get_free_pages(GFP_ATOMIC, order);
4586
4587
4588
4589
4590 if (table) {
4591 unsigned long alloc_end = (unsigned long)table +
4592 (PAGE_SIZE << order);
4593 unsigned long used = (unsigned long)table +
4594 PAGE_ALIGN(size);
4595 split_page(virt_to_page(table), order);
4596 while (used < alloc_end) {
4597 free_page(used);
4598 used += PAGE_SIZE;
4599 }
4600 }
4601 }
4602 } while (!table && size > PAGE_SIZE && --log2qty);
4603
4604 if (!table)
4605 panic("Failed to allocate %s hash table\n", tablename);
4606
4607 printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4608 tablename,
4609 (1U << log2qty),
4610 ilog2(size) - PAGE_SHIFT,
4611 size);
4612
4613 if (_hash_shift)
4614 *_hash_shift = log2qty;
4615 if (_hash_mask)
4616 *_hash_mask = (1 << log2qty) - 1;
4617
4618 return table;
4619}
4620
4621
4622static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4623 unsigned long pfn)
4624{
4625#ifdef CONFIG_SPARSEMEM
4626 return __pfn_to_section(pfn)->pageblock_flags;
4627#else
4628 return zone->pageblock_flags;
4629#endif
4630}
4631
4632static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4633{
4634#ifdef CONFIG_SPARSEMEM
4635 pfn &= (PAGES_PER_SECTION-1);
4636 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4637#else
4638 pfn = pfn - zone->zone_start_pfn;
4639 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4640#endif
4641}
4642
4643
4644
4645
4646
4647
4648
4649
4650unsigned long get_pageblock_flags_group(struct page *page,
4651 int start_bitidx, int end_bitidx)
4652{
4653 struct zone *zone;
4654 unsigned long *bitmap;
4655 unsigned long pfn, bitidx;
4656 unsigned long flags = 0;
4657 unsigned long value = 1;
4658
4659 zone = page_zone(page);
4660 pfn = page_to_pfn(page);
4661 bitmap = get_pageblock_bitmap(zone, pfn);
4662 bitidx = pfn_to_bitidx(zone, pfn);
4663
4664 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4665 if (test_bit(bitidx + start_bitidx, bitmap))
4666 flags |= value;
4667
4668 return flags;
4669}
4670
4671
4672
4673
4674
4675
4676
4677
4678void set_pageblock_flags_group(struct page *page, unsigned long flags,
4679 int start_bitidx, int end_bitidx)
4680{
4681 struct zone *zone;
4682 unsigned long *bitmap;
4683 unsigned long pfn, bitidx;
4684 unsigned long value = 1;
4685
4686 zone = page_zone(page);
4687 pfn = page_to_pfn(page);
4688 bitmap = get_pageblock_bitmap(zone, pfn);
4689 bitidx = pfn_to_bitidx(zone, pfn);
4690 VM_BUG_ON(pfn < zone->zone_start_pfn);
4691 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
4692
4693 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4694 if (flags & value)
4695 __set_bit(bitidx + start_bitidx, bitmap);
4696 else
4697 __clear_bit(bitidx + start_bitidx, bitmap);
4698}
4699
4700
4701
4702
4703
4704
4705
4706int set_migratetype_isolate(struct page *page)
4707{
4708 struct zone *zone;
4709 unsigned long flags;
4710 int ret = -EBUSY;
4711
4712 zone = page_zone(page);
4713 spin_lock_irqsave(&zone->lock, flags);
4714
4715
4716
4717 if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
4718 goto out;
4719 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
4720 move_freepages_block(zone, page, MIGRATE_ISOLATE);
4721 ret = 0;
4722out:
4723 spin_unlock_irqrestore(&zone->lock, flags);
4724 if (!ret)
4725 drain_all_pages();
4726 return ret;
4727}
4728
4729void unset_migratetype_isolate(struct page *