1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
74#include <linux/nodemask.h>
75#include <linux/cpuset.h>
76#include <linux/gfp.h>
77#include <linux/slab.h>
78#include <linux/string.h>
79#include <linux/module.h>
80#include <linux/nsproxy.h>
81#include <linux/interrupt.h>
82#include <linux/init.h>
83#include <linux/compat.h>
84#include <linux/swap.h>
85#include <linux/seq_file.h>
86#include <linux/proc_fs.h>
87#include <linux/migrate.h>
88#include <linux/rmap.h>
89#include <linux/security.h>
90#include <linux/syscalls.h>
91#include <linux/ctype.h>
92
93#include <asm/tlbflush.h>
94#include <asm/uaccess.h>
95
96#include "internal.h"
97
98
99#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)
100#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)
101#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)
102
103static struct kmem_cache *policy_cache;
104static struct kmem_cache *sn_cache;
105
106
107
108enum zone_type policy_zone = 0;
109
110
111
112
113struct mempolicy default_policy = {
114 .refcnt = ATOMIC_INIT(1),
115 .mode = MPOL_PREFERRED,
116 .flags = MPOL_F_LOCAL,
117};
118
119static const struct mempolicy_operations {
120 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
121 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
122} mpol_ops[MPOL_MAX];
123
124
125static int is_valid_nodemask(const nodemask_t *nodemask)
126{
127 int nd, k;
128
129
130 k = policy_zone;
131
132 for_each_node_mask(nd, *nodemask) {
133 struct zone *z;
134
135 for (k = 0; k <= policy_zone; k++) {
136 z = &NODE_DATA(nd)->node_zones[k];
137 if (z->present_pages > 0)
138 return 1;
139 }
140 }
141
142 return 0;
143}
144
145static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
146{
147 return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES);
148}
149
150static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
151 const nodemask_t *rel)
152{
153 nodemask_t tmp;
154 nodes_fold(tmp, *orig, nodes_weight(*rel));
155 nodes_onto(*ret, tmp, *rel);
156}
157
158static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
159{
160 if (nodes_empty(*nodes))
161 return -EINVAL;
162 pol->v.nodes = *nodes;
163 return 0;
164}
165
166static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
167{
168 if (!nodes)
169 pol->flags |= MPOL_F_LOCAL;
170 else if (nodes_empty(*nodes))
171 return -EINVAL;
172 else
173 pol->v.preferred_node = first_node(*nodes);
174 return 0;
175}
176
177static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
178{
179 if (!is_valid_nodemask(nodes))
180 return -EINVAL;
181 pol->v.nodes = *nodes;
182 return 0;
183}
184
185
186static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
187 nodemask_t *nodes)
188{
189 struct mempolicy *policy;
190 nodemask_t cpuset_context_nmask;
191 int ret;
192
193 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
194 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
195
196 if (mode == MPOL_DEFAULT) {
197 if (nodes && !nodes_empty(*nodes))
198 return ERR_PTR(-EINVAL);
199 return NULL;
200 }
201 VM_BUG_ON(!nodes);
202
203
204
205
206
207
208 if (mode == MPOL_PREFERRED) {
209 if (nodes_empty(*nodes)) {
210 if (((flags & MPOL_F_STATIC_NODES) ||
211 (flags & MPOL_F_RELATIVE_NODES)))
212 return ERR_PTR(-EINVAL);
213 nodes = NULL;
214 }
215 } else if (nodes_empty(*nodes))
216 return ERR_PTR(-EINVAL);
217 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
218 if (!policy)
219 return ERR_PTR(-ENOMEM);
220 atomic_set(&policy->refcnt, 1);
221 policy->mode = mode;
222 policy->flags = flags;
223
224 if (nodes) {
225
226
227
228 cpuset_update_task_memory_state();
229 if (flags & MPOL_F_RELATIVE_NODES)
230 mpol_relative_nodemask(&cpuset_context_nmask, nodes,
231 &cpuset_current_mems_allowed);
232 else
233 nodes_and(cpuset_context_nmask, *nodes,
234 cpuset_current_mems_allowed);
235 if (mpol_store_user_nodemask(policy))
236 policy->w.user_nodemask = *nodes;
237 else
238 policy->w.cpuset_mems_allowed =
239 cpuset_mems_allowed(current);
240 }
241
242 ret = mpol_ops[mode].create(policy,
243 nodes ? &cpuset_context_nmask : NULL);
244 if (ret < 0) {
245 kmem_cache_free(policy_cache, policy);
246 return ERR_PTR(ret);
247 }
248 return policy;
249}
250
251
252void __mpol_put(struct mempolicy *p)
253{
254 if (!atomic_dec_and_test(&p->refcnt))
255 return;
256 kmem_cache_free(policy_cache, p);
257}
258
259static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
260{
261}
262
263static void mpol_rebind_nodemask(struct mempolicy *pol,
264 const nodemask_t *nodes)
265{
266 nodemask_t tmp;
267
268 if (pol->flags & MPOL_F_STATIC_NODES)
269 nodes_and(tmp, pol->w.user_nodemask, *nodes);
270 else if (pol->flags & MPOL_F_RELATIVE_NODES)
271 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
272 else {
273 nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
274 *nodes);
275 pol->w.cpuset_mems_allowed = *nodes;
276 }
277
278 pol->v.nodes = tmp;
279 if (!node_isset(current->il_next, tmp)) {
280 current->il_next = next_node(current->il_next, tmp);
281 if (current->il_next >= MAX_NUMNODES)
282 current->il_next = first_node(tmp);
283 if (current->il_next >= MAX_NUMNODES)
284 current->il_next = numa_node_id();
285 }
286}
287
288static void mpol_rebind_preferred(struct mempolicy *pol,
289 const nodemask_t *nodes)
290{
291 nodemask_t tmp;
292
293 if (pol->flags & MPOL_F_STATIC_NODES) {
294 int node = first_node(pol->w.user_nodemask);
295
296 if (node_isset(node, *nodes)) {
297 pol->v.preferred_node = node;
298 pol->flags &= ~MPOL_F_LOCAL;
299 } else
300 pol->flags |= MPOL_F_LOCAL;
301 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
302 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
303 pol->v.preferred_node = first_node(tmp);
304 } else if (!(pol->flags & MPOL_F_LOCAL)) {
305 pol->v.preferred_node = node_remap(pol->v.preferred_node,
306 pol->w.cpuset_mems_allowed,
307 *nodes);
308 pol->w.cpuset_mems_allowed = *nodes;
309 }
310}
311
312
313static void mpol_rebind_policy(struct mempolicy *pol,
314 const nodemask_t *newmask)
315{
316 if (!pol)
317 return;
318 if (!mpol_store_user_nodemask(pol) &&
319 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
320 return;
321 mpol_ops[pol->mode].rebind(pol, newmask);
322}
323
324
325
326
327
328
329void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
330{
331 mpol_rebind_policy(tsk->mempolicy, new);
332}
333
334
335
336
337
338
339
340void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
341{
342 struct vm_area_struct *vma;
343
344 down_write(&mm->mmap_sem);
345 for (vma = mm->mmap; vma; vma = vma->vm_next)
346 mpol_rebind_policy(vma->vm_policy, new);
347 up_write(&mm->mmap_sem);
348}
349
350static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
351 [MPOL_DEFAULT] = {
352 .rebind = mpol_rebind_default,
353 },
354 [MPOL_INTERLEAVE] = {
355 .create = mpol_new_interleave,
356 .rebind = mpol_rebind_nodemask,
357 },
358 [MPOL_PREFERRED] = {
359 .create = mpol_new_preferred,
360 .rebind = mpol_rebind_preferred,
361 },
362 [MPOL_BIND] = {
363 .create = mpol_new_bind,
364 .rebind = mpol_rebind_nodemask,
365 },
366};
367
368static void gather_stats(struct page *, void *, int pte_dirty);
369static void migrate_page_add(struct page *page, struct list_head *pagelist,
370 unsigned long flags);
371
372
373static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
374 unsigned long addr, unsigned long end,
375 const nodemask_t *nodes, unsigned long flags,
376 void *private)
377{
378 pte_t *orig_pte;
379 pte_t *pte;
380 spinlock_t *ptl;
381
382 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
383 do {
384 struct page *page;
385 int nid;
386
387 if (!pte_present(*pte))
388 continue;
389 page = vm_normal_page(vma, addr, *pte);
390 if (!page)
391 continue;
392
393
394
395
396
397
398
399
400
401
402
403 if (PageReserved(page))
404 continue;
405 nid = page_to_nid(page);
406 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
407 continue;
408
409 if (flags & MPOL_MF_STATS)
410 gather_stats(page, private, pte_dirty(*pte));
411 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
412 migrate_page_add(page, private, flags);
413 else
414 break;
415 } while (pte++, addr += PAGE_SIZE, addr != end);
416 pte_unmap_unlock(orig_pte, ptl);
417 return addr != end;
418}
419
420static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
421 unsigned long addr, unsigned long end,
422 const nodemask_t *nodes, unsigned long flags,
423 void *private)
424{
425 pmd_t *pmd;
426 unsigned long next;
427
428 pmd = pmd_offset(pud, addr);
429 do {
430 next = pmd_addr_end(addr, end);
431 if (pmd_none_or_clear_bad(pmd))
432 continue;
433 if (check_pte_range(vma, pmd, addr, next, nodes,
434 flags, private))
435 return -EIO;
436 } while (pmd++, addr = next, addr != end);
437 return 0;
438}
439
440static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
441 unsigned long addr, unsigned long end,
442 const nodemask_t *nodes, unsigned long flags,
443 void *private)
444{
445 pud_t *pud;
446 unsigned long next;
447
448 pud = pud_offset(pgd, addr);
449 do {
450 next = pud_addr_end(addr, end);
451 if (pud_none_or_clear_bad(pud))
452 continue;
453 if (check_pmd_range(vma, pud, addr, next, nodes,
454 flags, private))
455 return -EIO;
456 } while (pud++, addr = next, addr != end);
457 return 0;
458}
459
460static inline int check_pgd_range(struct vm_area_struct *vma,
461 unsigned long addr, unsigned long end,
462 const nodemask_t *nodes, unsigned long flags,
463 void *private)
464{
465 pgd_t *pgd;
466 unsigned long next;
467
468 pgd = pgd_offset(vma->vm_mm, addr);
469 do {
470 next = pgd_addr_end(addr, end);
471 if (pgd_none_or_clear_bad(pgd))
472 continue;
473 if (check_pud_range(vma, pgd, addr, next, nodes,
474 flags, private))
475 return -EIO;
476 } while (pgd++, addr = next, addr != end);
477 return 0;
478}
479
480
481
482
483
484
485static struct vm_area_struct *
486check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
487 const nodemask_t *nodes, unsigned long flags, void *private)
488{
489 int err;
490 struct vm_area_struct *first, *vma, *prev;
491
492
493 first = find_vma(mm, start);
494 if (!first)
495 return ERR_PTR(-EFAULT);
496 prev = NULL;
497 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
498 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
499 if (!vma->vm_next && vma->vm_end < end)
500 return ERR_PTR(-EFAULT);
501 if (prev && prev->vm_end < vma->vm_start)
502 return ERR_PTR(-EFAULT);
503 }
504 if (!is_vm_hugetlb_page(vma) &&
505 ((flags & MPOL_MF_STRICT) ||
506 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
507 vma_migratable(vma)))) {
508 unsigned long endvma = vma->vm_end;
509
510 if (endvma > end)
511 endvma = end;
512 if (vma->vm_start > start)
513 start = vma->vm_start;
514 err = check_pgd_range(vma, start, endvma, nodes,
515 flags, private);
516 if (err) {
517 first = ERR_PTR(err);
518 break;
519 }
520 }
521 prev = vma;
522 }
523 return first;
524}
525
526
527static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
528{
529 int err = 0;
530 struct mempolicy *old = vma->vm_policy;
531
532 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
533 vma->vm_start, vma->vm_end, vma->vm_pgoff,
534 vma->vm_ops, vma->vm_file,
535 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
536
537 if (vma->vm_ops && vma->vm_ops->set_policy)
538 err = vma->vm_ops->set_policy(vma, new);
539 if (!err) {
540 mpol_get(new);
541 vma->vm_policy = new;
542 mpol_put(old);
543 }
544 return err;
545}
546
547
548static int mbind_range(struct vm_area_struct *vma, unsigned long start,
549 unsigned long end, struct mempolicy *new)
550{
551 struct vm_area_struct *next;
552 int err;
553
554 err = 0;
555 for (; vma && vma->vm_start < end; vma = next) {
556 next = vma->vm_next;
557 if (vma->vm_start < start)
558 err = split_vma(vma->vm_mm, vma, start, 1);
559 if (!err && vma->vm_end > end)
560 err = split_vma(vma->vm_mm, vma, end, 0);
561 if (!err)
562 err = policy_vma(vma, new);
563 if (err)
564 break;
565 }
566 return err;
567}
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586void mpol_fix_fork_child_flag(struct task_struct *p)
587{
588 if (p->mempolicy)
589 p->flags |= PF_MEMPOLICY;
590 else
591 p->flags &= ~PF_MEMPOLICY;
592}
593
594static void mpol_set_task_struct_flag(void)
595{
596 mpol_fix_fork_child_flag(current);
597}
598
599
600static long do_set_mempolicy(unsigned short mode, unsigned short flags,
601 nodemask_t *nodes)
602{
603 struct mempolicy *new;
604 struct mm_struct *mm = current->mm;
605
606 new = mpol_new(mode, flags, nodes);
607 if (IS_ERR(new))
608 return PTR_ERR(new);
609
610
611
612
613
614
615
616 if (mm)
617 down_write(&mm->mmap_sem);
618 mpol_put(current->mempolicy);
619 current->mempolicy = new;
620 mpol_set_task_struct_flag();
621 if (new && new->mode == MPOL_INTERLEAVE &&
622 nodes_weight(new->v.nodes))
623 current->il_next = first_node(new->v.nodes);
624 if (mm)
625 up_write(&mm->mmap_sem);
626
627 return 0;
628}
629
630
631
632
633static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
634{
635 nodes_clear(*nodes);
636 if (p == &default_policy)
637 return;
638
639 switch (p->mode) {
640 case MPOL_BIND:
641
642 case MPOL_INTERLEAVE:
643 *nodes = p->v.nodes;
644 break;
645 case MPOL_PREFERRED:
646 if (!(p->flags & MPOL_F_LOCAL))
647 node_set(p->v.preferred_node, *nodes);
648
649 break;
650 default:
651 BUG();
652 }
653}
654
655static int lookup_node(struct mm_struct *mm, unsigned long addr)
656{
657 struct page *p;
658 int err;
659
660 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
661 if (err >= 0) {
662 err = page_to_nid(p);
663 put_page(p);
664 }
665 return err;
666}
667
668
669static long do_get_mempolicy(int *policy, nodemask_t *nmask,
670 unsigned long addr, unsigned long flags)
671{
672 int err;
673 struct mm_struct *mm = current->mm;
674 struct vm_area_struct *vma = NULL;
675 struct mempolicy *pol = current->mempolicy;
676
677 cpuset_update_task_memory_state();
678 if (flags &
679 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
680 return -EINVAL;
681
682 if (flags & MPOL_F_MEMS_ALLOWED) {
683 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
684 return -EINVAL;
685 *policy = 0;
686 *nmask = cpuset_current_mems_allowed;
687 return 0;
688 }
689
690 if (flags & MPOL_F_ADDR) {
691
692
693
694
695
696 down_read(&mm->mmap_sem);
697 vma = find_vma_intersection(mm, addr, addr+1);
698 if (!vma) {
699 up_read(&mm->mmap_sem);
700 return -EFAULT;
701 }
702 if (vma->vm_ops && vma->vm_ops->get_policy)
703 pol = vma->vm_ops->get_policy(vma, addr);
704 else
705 pol = vma->vm_policy;
706 } else if (addr)
707 return -EINVAL;
708
709 if (!pol)
710 pol = &default_policy;
711
712 if (flags & MPOL_F_NODE) {
713 if (flags & MPOL_F_ADDR) {
714 err = lookup_node(mm, addr);
715 if (err < 0)
716 goto out;
717 *policy = err;
718 } else if (pol == current->mempolicy &&
719 pol->mode == MPOL_INTERLEAVE) {
720 *policy = current->il_next;
721 } else {
722 err = -EINVAL;
723 goto out;
724 }
725 } else {
726 *policy = pol == &default_policy ? MPOL_DEFAULT :
727 pol->mode;
728
729
730
731
732 *policy |= (pol->flags & MPOL_MODE_FLAGS);
733 }
734
735 if (vma) {
736 up_read(¤t->mm->mmap_sem);
737 vma = NULL;
738 }
739
740 err = 0;
741 if (nmask)
742 get_policy_nodemask(pol, nmask);
743
744 out:
745 mpol_cond_put(pol);
746 if (vma)
747 up_read(¤t->mm->mmap_sem);
748 return err;
749}
750
751#ifdef CONFIG_MIGRATION
752
753
754
755static void migrate_page_add(struct page *page, struct list_head *pagelist,
756 unsigned long flags)
757{
758
759
760
761 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
762 if (!isolate_lru_page(page)) {
763 list_add_tail(&page->lru, pagelist);
764 }
765 }
766}
767
768static struct page *new_node_page(struct page *page, unsigned long node, int **x)
769{
770 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
771}
772
773
774
775
776
777static int migrate_to_node(struct mm_struct *mm, int source, int dest,
778 int flags)
779{
780 nodemask_t nmask;
781 LIST_HEAD(pagelist);
782 int err = 0;
783
784 nodes_clear(nmask);
785 node_set(source, nmask);
786
787 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
788 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
789
790 if (!list_empty(&pagelist))
791 err = migrate_pages(&pagelist, new_node_page, dest);
792
793 return err;
794}
795
796
797
798
799
800
801
802int do_migrate_pages(struct mm_struct *mm,
803 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
804{
805 int busy = 0;
806 int err;
807 nodemask_t tmp;
808
809 err = migrate_prep();
810 if (err)
811 return err;
812
813 down_read(&mm->mmap_sem);
814
815 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
816 if (err)
817 goto out;
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850 tmp = *from_nodes;
851 while (!nodes_empty(tmp)) {
852 int s,d;
853 int source = -1;
854 int dest = 0;
855
856 for_each_node_mask(s, tmp) {
857 d = node_remap(s, *from_nodes, *to_nodes);
858 if (s == d)
859 continue;
860
861 source = s;
862 dest = d;
863
864
865 if (!node_isset(dest, tmp))
866 break;
867 }
868 if (source == -1)
869 break;
870
871 node_clear(source, tmp);
872 err = migrate_to_node(mm, source, dest, flags);
873 if (err > 0)
874 busy += err;
875 if (err < 0)
876 break;
877 }
878out:
879 up_read(&mm->mmap_sem);
880 if (err < 0)
881 return err;
882 return busy;
883
884}
885
886
887
888
889
890
891
892
893static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
894{
895 struct vm_area_struct *vma = (struct vm_area_struct *)private;
896 unsigned long uninitialized_var(address);
897
898 while (vma) {
899 address = page_address_in_vma(page, vma);
900 if (address != -EFAULT)
901 break;
902 vma = vma->vm_next;
903 }
904
905
906
907
908 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
909}
910#else
911
912static void migrate_page_add(struct page *page, struct list_head *pagelist,
913 unsigned long flags)
914{
915}
916
917int do_migrate_pages(struct mm_struct *mm,
918 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
919{
920 return -ENOSYS;
921}
922
923static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
924{
925 return NULL;
926}
927#endif
928
929static long do_mbind(unsigned long start, unsigned long len,
930 unsigned short mode, unsigned short mode_flags,
931 nodemask_t *nmask, unsigned long flags)
932{
933 struct vm_area_struct *vma;
934 struct mm_struct *mm = current->mm;
935 struct mempolicy *new;
936 unsigned long end;
937 int err;
938 LIST_HEAD(pagelist);
939
940 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
941 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
942 return -EINVAL;
943 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
944 return -EPERM;
945
946 if (start & ~PAGE_MASK)
947 return -EINVAL;
948
949 if (mode == MPOL_DEFAULT)
950 flags &= ~MPOL_MF_STRICT;
951
952 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
953 end = start + len;
954
955 if (end < start)
956 return -EINVAL;
957 if (end == start)
958 return 0;
959
960 new = mpol_new(mode, mode_flags, nmask);
961 if (IS_ERR(new))
962 return PTR_ERR(new);
963
964
965
966
967
968 if (!new)
969 flags |= MPOL_MF_DISCONTIG_OK;
970
971 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
972 start, start + len, mode, mode_flags,
973 nmask ? nodes_addr(*nmask)[0] : -1);
974
975 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
976
977 err = migrate_prep();
978 if (err)
979 return err;
980 }
981 down_write(&mm->mmap_sem);
982 vma = check_range(mm, start, end, nmask,
983 flags | MPOL_MF_INVERT, &pagelist);
984
985 err = PTR_ERR(vma);
986 if (!IS_ERR(vma)) {
987 int nr_failed = 0;
988
989 err = mbind_range(vma, start, end, new);
990
991 if (!list_empty(&pagelist))
992 nr_failed = migrate_pages(&pagelist, new_vma_page,
993 (unsigned long)vma);
994
995 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
996 err = -EIO;
997 }
998
999 up_write(&mm->mmap_sem);
1000 mpol_put(new);
1001 return err;
1002}
1003
1004
1005
1006
1007
1008
1009static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1010 unsigned long maxnode)
1011{
1012 unsigned long k;
1013 unsigned long nlongs;
1014 unsigned long endmask;
1015
1016 --maxnode;
1017 nodes_clear(*nodes);
1018 if (maxnode == 0 || !nmask)
1019 return 0;
1020 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1021 return -EINVAL;
1022
1023 nlongs = BITS_TO_LONGS(maxnode);
1024 if ((maxnode % BITS_PER_LONG) == 0)
1025 endmask = ~0UL;
1026 else
1027 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1028
1029
1030
1031 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1032 if (nlongs > PAGE_SIZE/sizeof(long))
1033 return -EINVAL;
1034 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1035 unsigned long t;
1036 if (get_user(t, nmask + k))
1037 return -EFAULT;
1038 if (k == nlongs - 1) {
1039 if (t & endmask)
1040 return -EINVAL;
1041 } else if (t)
1042 return -EINVAL;
1043 }
1044 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1045 endmask = ~0UL;
1046 }
1047
1048 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1049 return -EFAULT;
1050 nodes_addr(*nodes)[nlongs-1] &= endmask;
1051 return 0;
1052}
1053
1054
1055static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1056 nodemask_t *nodes)
1057{
1058 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1059 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1060
1061 if (copy > nbytes) {
1062 if (copy > PAGE_SIZE)
1063 return -EINVAL;
1064 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1065 return -EFAULT;
1066 copy = nbytes;
1067 }
1068 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1069}
1070
1071asmlinkage long sys_mbind(unsigned long start, unsigned long len,
1072 unsigned long mode,
1073 unsigned long __user *nmask, unsigned long maxnode,
1074 unsigned flags)
1075{
1076 nodemask_t nodes;
1077 int err;
1078 unsigned short mode_flags;
1079
1080 mode_flags = mode & MPOL_MODE_FLAGS;
1081 mode &= ~MPOL_MODE_FLAGS;
1082 if (mode >= MPOL_MAX)
1083 return -EINVAL;
1084 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1085 (mode_flags & MPOL_F_RELATIVE_NODES))
1086 return -EINVAL;
1087 err = get_nodes(&nodes, nmask, maxnode);
1088 if (err)
1089 return err;
1090 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1091}
1092
1093
1094asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
1095 unsigned long maxnode)
1096{
1097 int err;
1098 nodemask_t nodes;
1099 unsigned short flags;
1100
1101 flags = mode & MPOL_MODE_FLAGS;
1102 mode &= ~MPOL_MODE_FLAGS;
1103 if ((unsigned int)mode >= MPOL_MAX)
1104 return -EINVAL;
1105 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1106 return -EINVAL;
1107 err = get_nodes(&nodes, nmask, maxnode);
1108 if (err)
1109 return err;
1110 return do_set_mempolicy(mode, flags, &nodes);
1111}
1112
1113asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
1114 const unsigned long __user *old_nodes,
1115 const unsigned long __user *new_nodes)
1116{
1117 struct mm_struct *mm;
1118 struct task_struct *task;
1119 nodemask_t old;
1120 nodemask_t new;
1121 nodemask_t task_nodes;
1122 int err;
1123
1124 err = get_nodes(&old, old_nodes, maxnode);
1125 if (err)
1126 return err;
1127
1128 err = get_nodes(&new, new_nodes, maxnode);
1129 if (err)
1130 return err;
1131
1132
1133 read_lock(&tasklist_lock);
1134 task = pid ? find_task_by_vpid(pid) : current;
1135 if (!task) {
1136 read_unlock(&tasklist_lock);
1137 return -ESRCH;
1138 }
1139 mm = get_task_mm(task);
1140 read_unlock(&tasklist_lock);
1141
1142 if (!mm)
1143 return -EINVAL;
1144
1145
1146
1147
1148
1149
1150
1151 if ((current->euid != task->suid) && (current->euid != task->uid) &&
1152 (current->uid != task->suid) && (current->uid != task->uid) &&
1153 !capable(CAP_SYS_NICE)) {
1154 err = -EPERM;
1155 goto out;
1156 }
1157
1158 task_nodes = cpuset_mems_allowed(task);
1159
1160 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
1161 err = -EPERM;
1162 goto out;
1163 }
1164
1165 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
1166 err = -EINVAL;
1167 goto out;
1168 }
1169
1170 err = security_task_movememory(task);
1171 if (err)
1172 goto out;
1173
1174 err = do_migrate_pages(mm, &old, &new,
1175 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1176out:
1177 mmput(mm);
1178 return err;
1179}
1180
1181
1182
1183asmlinkage long sys_get_mempolicy(int __user *policy,
1184 unsigned long __user *nmask,
1185 unsigned long maxnode,
1186 unsigned long addr, unsigned long flags)
1187{
1188 int err;
1189 int uninitialized_var(pval);
1190 nodemask_t nodes;
1191
1192 if (nmask != NULL && maxnode < MAX_NUMNODES)
1193 return -EINVAL;
1194
1195 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1196
1197 if (err)
1198 return err;
1199
1200 if (policy && put_user(pval, policy))
1201 return -EFAULT;
1202
1203 if (nmask)
1204 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1205
1206 return err;
1207}
1208
1209#ifdef CONFIG_COMPAT
1210
1211asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1212 compat_ulong_t __user *nmask,
1213 compat_ulong_t maxnode,
1214 compat_ulong_t addr, compat_ulong_t flags)
1215{
1216 long err;
1217 unsigned long __user *nm = NULL;
1218 unsigned long nr_bits, alloc_size;
1219 DECLARE_BITMAP(bm, MAX_NUMNODES);
1220
1221 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1222 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1223
1224 if (nmask)
1225 nm = compat_alloc_user_space(alloc_size);
1226
1227 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1228
1229 if (!err && nmask) {
1230 err = copy_from_user(bm, nm, alloc_size);
1231
1232 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1233 err |= compat_put_bitmap(nmask, bm, nr_bits);
1234 }
1235
1236 return err;
1237}
1238
1239asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1240 compat_ulong_t maxnode)
1241{
1242 long err = 0;
1243 unsigned long __user *nm = NULL;
1244 unsigned long nr_bits, alloc_size;
1245 DECLARE_BITMAP(bm, MAX_NUMNODES);
1246
1247 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1248 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1249
1250 if (nmask) {
1251 err = compat_get_bitmap(bm, nmask, nr_bits);
1252 nm = compat_alloc_user_space(alloc_size);
1253 err |= copy_to_user(nm, bm, alloc_size);
1254 }
1255
1256 if (err)
1257 return -EFAULT;
1258
1259 return sys_set_mempolicy(mode, nm, nr_bits+1);
1260}
1261
1262asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1263 compat_ulong_t mode, compat_ulong_t __user *nmask,
1264 compat_ulong_t maxnode, compat_ulong_t flags)
1265{
1266 long err = 0;
1267 unsigned long __user *nm = NULL;
1268 unsigned long nr_bits, alloc_size;
1269 nodemask_t bm;
1270
1271 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1272 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1273
1274 if (nmask) {
1275 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1276 nm = compat_alloc_user_space(alloc_size);
1277 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1278 }
1279
1280 if (err)
1281 return -EFAULT;
1282
1283 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1284}
1285
1286#endif
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304static struct mempolicy *get_vma_policy(struct task_struct *task,
1305 struct vm_area_struct *vma, unsigned long addr)
1306{
1307 struct mempolicy *pol = task->mempolicy;
1308
1309 if (vma) {
1310 if (vma->vm_ops && vma->vm_ops->get_policy) {
1311 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1312 addr);
1313 if (vpol)
1314 pol = vpol;
1315 } else if (vma->vm_policy)
1316 pol = vma->vm_policy;
1317 }
1318 if (!pol)
1319 pol = &default_policy;
1320 return pol;
1321}
1322
1323
1324
1325
1326
1327static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1328{
1329
1330 if (unlikely(policy->mode == MPOL_BIND) &&
1331 gfp_zone(gfp) >= policy_zone &&
1332 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1333 return &policy->v.nodes;
1334
1335 return NULL;
1336}
1337
1338
1339static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
1340{
1341 int nd = numa_node_id();
1342
1343 switch (policy->mode) {
1344 case MPOL_PREFERRED:
1345 if (!(policy->flags & MPOL_F_LOCAL))
1346 nd = policy->v.preferred_node;
1347 break;
1348 case MPOL_BIND:
1349
1350
1351
1352
1353
1354
1355 if (unlikely(gfp & __GFP_THISNODE) &&
1356 unlikely(!node_isset(nd, policy->v.nodes)))
1357 nd = first_node(policy->v.nodes);
1358 break;
1359 case MPOL_INTERLEAVE:
1360 break;
1361 default:
1362 BUG();
1363 }
1364 return node_zonelist(nd, gfp);
1365}
1366
1367
1368static unsigned interleave_nodes(struct mempolicy *policy)
1369{
1370 unsigned nid, next;
1371 struct task_struct *me = current;
1372
1373 nid = me->il_next;
1374 next = next_node(nid, policy->v.nodes);
1375 if (next >= MAX_NUMNODES)
1376 next = first_node(policy->v.nodes);
1377 if (next < MAX_NUMNODES)
1378 me->il_next = next;
1379 return nid;
1380}
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390unsigned slab_node(struct mempolicy *policy)
1391{
1392 if (!policy || policy->flags & MPOL_F_LOCAL)
1393 return numa_node_id();
1394
1395 switch (policy->mode) {
1396 case MPOL_PREFERRED:
1397
1398
1399
1400 return policy->v.preferred_node;
1401
1402 case MPOL_INTERLEAVE:
1403 return interleave_nodes(policy);
1404
1405 case MPOL_BIND: {
1406
1407
1408
1409
1410 struct zonelist *zonelist;
1411 struct zone *zone;
1412 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1413 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1414 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1415 &policy->v.nodes,
1416 &zone);
1417 return zone->node;
1418 }
1419
1420 default:
1421 BUG();
1422 }
1423}
1424
1425
1426static unsigned offset_il_node(struct mempolicy *pol,
1427 struct vm_area_struct *vma, unsigned long off)
1428{
1429 unsigned nnodes = nodes_weight(pol->v.nodes);
1430 unsigned target;
1431 int c;
1432 int nid = -1;
1433
1434 if (!nnodes)
1435 return numa_node_id();
1436 target = (unsigned int)off % nnodes;
1437 c = 0;
1438 do {
1439 nid = next_node(nid, pol->v.nodes);
1440 c++;
1441 } while (c <= target);
1442 return nid;
1443}
1444
1445
1446static inline unsigned interleave_nid(struct mempolicy *pol,
1447 struct vm_area_struct *vma, unsigned long addr, int shift)
1448{
1449 if (vma) {
1450 unsigned long off;
1451
1452
1453
1454
1455
1456
1457
1458
1459 BUG_ON(shift < PAGE_SHIFT);
1460 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1461 off += (addr - vma->vm_start) >> shift;
1462 return offset_il_node(pol, vma, off);
1463 } else
1464 return interleave_nodes(pol);
1465}
1466
1467#ifdef CONFIG_HUGETLBFS
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1482 gfp_t gfp_flags, struct mempolicy **mpol,
1483 nodemask_t **nodemask)
1484{
1485 struct zonelist *zl;
1486
1487 *mpol = get_vma_policy(current, vma, addr);
1488 *nodemask = NULL;
1489
1490 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1491 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1492 huge_page_shift(hstate_vma(vma))), gfp_flags);
1493 } else {
1494 zl = policy_zonelist(gfp_flags, *mpol);
1495 if ((*mpol)->mode == MPOL_BIND)
1496 *nodemask = &(*mpol)->v.nodes;
1497 }
1498 return zl;
1499}
1500#endif
1501
1502
1503
1504static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1505 unsigned nid)
1506{
1507 struct zonelist *zl;
1508 struct page *page;
1509
1510 zl = node_zonelist(nid, gfp);
1511 page = __alloc_pages(gfp, order, zl);
1512 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1513 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1514 return page;
1515}
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539struct page *
1540alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1541{
1542 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1543 struct zonelist *zl;
1544
1545 cpuset_update_task_memory_state();
1546
1547 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1548 unsigned nid;
1549
1550 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1551 mpol_cond_put(pol);
1552 return alloc_page_interleave(gfp, 0, nid);
1553 }
1554 zl = policy_zonelist(gfp, pol);
1555 if (unlikely(mpol_needs_cond_ref(pol))) {
1556
1557
1558
1559 struct page *page = __alloc_pages_nodemask(gfp, 0,
1560 zl, policy_nodemask(gfp, pol));
1561 __mpol_put(pol);
1562 return page;
1563 }
1564
1565
1566
1567 return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
1568}
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1590{
1591 struct mempolicy *pol = current->mempolicy;
1592
1593 if ((gfp & __GFP_WAIT) && !in_interrupt())
1594 cpuset_update_task_memory_state();
1595 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1596 pol = &default_policy;
1597
1598
1599
1600
1601
1602 if (pol->mode == MPOL_INTERLEAVE)
1603 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1604 return __alloc_pages_nodemask(gfp, order,
1605 policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
1606}
1607EXPORT_SYMBOL(alloc_pages_current);
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618struct mempolicy *__mpol_dup(struct mempolicy *old)
1619{
1620 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1621
1622 if (!new)
1623 return ERR_PTR(-ENOMEM);
1624 if (current_cpuset_is_being_rebound()) {
1625 nodemask_t mems = cpuset_mems_allowed(current);
1626 mpol_rebind_policy(old, &mems);
1627 }
1628 *new = *old;
1629 atomic_set(&new->refcnt, 1);
1630 return new;
1631}
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1644 struct mempolicy *frompol)
1645{
1646 if (!mpol_needs_cond_ref(frompol))
1647 return frompol;
1648
1649 *tompol = *frompol;
1650 tompol->flags &= ~MPOL_F_SHARED;
1651 __mpol_put(frompol);
1652 return tompol;
1653}
1654
1655static int mpol_match_intent(const struct mempolicy *a,
1656 const struct mempolicy *b)
1657{
1658 if (a->flags != b->flags)
1659 return 0;
1660 if (!mpol_store_user_nodemask(a))
1661 return 1;
1662 return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
1663}
1664
1665
1666int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1667{
1668 if (!a || !b)
1669 return 0;
1670 if (a->mode != b->mode)
1671 return 0;
1672 if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
1673 return 0;
1674 switch (a->mode) {
1675 case MPOL_BIND:
1676
1677 case MPOL_INTERLEAVE:
1678 return nodes_equal(a->v.nodes, b->v.nodes);
1679 case MPOL_PREFERRED:
1680 return a->v.preferred_node == b->v.preferred_node &&
1681 a->flags == b->flags;
1682 default:
1683 BUG();
1684 return 0;
1685 }
1686}
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699static struct sp_node *
1700sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1701{
1702 struct rb_node *n = sp->root.rb_node;
1703
1704 while (n) {
1705 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1706
1707 if (start >= p->end)
1708 n = n->rb_right;
1709 else if (end <= p->start)
1710 n = n->rb_left;
1711 else
1712 break;
1713 }
1714 if (!n)
1715 return NULL;
1716 for (;;) {
1717 struct sp_node *w = NULL;
1718 struct rb_node *prev = rb_prev(n);
1719 if (!prev)
1720 break;
1721 w = rb_entry(prev, struct sp_node, nd);
1722 if (w->end <= start)
1723 break;
1724 n = prev;
1725 }
1726 return rb_entry(n, struct sp_node, nd);
1727}
1728
1729
1730
1731static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1732{
1733 struct rb_node **p = &sp->root.rb_node;
1734 struct rb_node *parent = NULL;
1735 struct sp_node *nd;
1736
1737 while (*p) {
1738 parent = *p;
1739 nd = rb_entry(parent, struct sp_node, nd);
1740 if (new->start < nd->start)
1741 p = &(*p)->rb_left;
1742 else if (new->end > nd->end)
1743 p = &(*p)->rb_right;
1744 else
1745 BUG();
1746 }
1747 rb_link_node(&new->nd, parent, p);
1748 rb_insert_color(&new->nd, &sp->root);
1749 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1750 new->policy ? new->policy->mode : 0);
1751}
1752
1753
1754struct mempolicy *
1755mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1756{
1757 struct mempolicy *pol = NULL;
1758 struct sp_node *sn;
1759
1760 if (!sp->root.rb_node)
1761 return NULL;
1762 spin_lock(&sp->lock);
1763 sn = sp_lookup(sp, idx, idx+1);
1764 if (sn) {
1765 mpol_get(sn->policy);
1766 pol = sn->policy;
1767 }
1768 spin_unlock(&sp->lock);
1769 return pol;
1770}
1771
1772static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1773{
1774 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1775 rb_erase(&n->nd, &sp->root);
1776 mpol_put(n->policy);
1777 kmem_cache_free(sn_cache, n);
1778}
1779
1780static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1781 struct mempolicy *pol)
1782{
1783 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1784
1785 if (!n)
1786 return NULL;
1787 n->start = start;
1788 n->end = end;
1789 mpol_get(pol);
1790 pol->flags |= MPOL_F_SHARED;
1791 n->policy = pol;
1792 return n;
1793}
1794
1795
1796static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1797 unsigned long end, struct sp_node *new)
1798{
1799 struct sp_node *n, *new2 = NULL;
1800
1801restart:
1802 spin_lock(&sp->lock);
1803 n = sp_lookup(sp, start, end);
1804
1805 while (n && n->start < end) {
1806 struct rb_node *next = rb_next(&n->nd);
1807 if (n->start >= start) {
1808 if (n->end <= end)
1809 sp_delete(sp, n);
1810 else
1811 n->start = end;
1812 } else {
1813
1814 if (n->end > end) {
1815 if (!new2) {
1816 spin_unlock(&sp->lock);
1817 new2 = sp_alloc(end, n->end, n->policy);
1818 if (!new2)
1819 return -ENOMEM;
1820 goto restart;
1821 }
1822 n->end = start;
1823 sp_insert(sp, new2);
1824 new2 = NULL;
1825 break;
1826 } else
1827 n->end = start;
1828 }
1829 if (!next)
1830 break;
1831 n = rb_entry(next, struct sp_node, nd);
1832 }
1833 if (new)
1834 sp_insert(sp, new);
1835 spin_unlock(&sp->lock);
1836 if (new2) {
1837 mpol_put(new2->policy);
1838 kmem_cache_free(sn_cache, new2);
1839 }
1840 return 0;
1841}
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
1853{
1854 sp->root = RB_ROOT;
1855 spin_lock_init(&sp->lock);
1856
1857 if (mpol) {
1858 struct vm_area_struct pvma;
1859 struct mempolicy *new;
1860
1861
1862 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
1863 mpol_put(mpol);
1864 if (IS_ERR(new))
1865 return;
1866
1867
1868 memset(&pvma, 0, sizeof(struct vm_area_struct));
1869 pvma.vm_end = TASK_SIZE;
1870 mpol_set_shared_policy(sp, &pvma, new);
1871 mpol_put(new);
1872 }
1873}
1874
1875int mpol_set_shared_policy(struct shared_policy *info,
1876 struct vm_area_struct *vma, struct mempolicy *npol)
1877{
1878 int err;
1879 struct sp_node *new = NULL;
1880 unsigned long sz = vma_pages(vma);
1881
1882 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1883 vma->vm_pgoff,
1884 sz, npol ? npol->mode : -1,
1885 npol ? npol->flags : -1,
1886 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1887
1888 if (npol) {
1889 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1890 if (!new)
1891 return -ENOMEM;
1892 }
1893 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1894 if (err && new)
1895 kmem_cache_free(sn_cache, new);
1896 return err;
1897}
1898
1899
1900void mpol_free_shared_policy(struct shared_policy *p)
1901{
1902 struct sp_node *n;
1903 struct rb_node *next;
1904
1905 if (!p->root.rb_node)
1906 return;
1907 spin_lock(&p->lock);
1908 next = rb_first(&p->root);
1909 while (next) {
1910 n = rb_entry(next, struct sp_node, nd);
1911 next = rb_next(&n->nd);
1912 rb_erase(&n->nd, &p->root);
1913 mpol_put(n->policy);
1914 kmem_cache_free(sn_cache, n);
1915 }
1916 spin_unlock(&p->lock);
1917}
1918
1919
1920void __init numa_policy_init(void)
1921{
1922 nodemask_t interleave_nodes;
1923 unsigned long largest = 0;
1924 int nid, prefer = 0;
1925
1926 policy_cache = kmem_cache_create("numa_policy",
1927 sizeof(struct mempolicy),
1928 0, SLAB_PANIC, NULL);
1929
1930 sn_cache = kmem_cache_create("shared_policy_node",
1931 sizeof(struct sp_node),
1932 0, SLAB_PANIC, NULL);
1933
1934
1935
1936
1937
1938
1939 nodes_clear(interleave_nodes);
1940 for_each_node_state(nid, N_HIGH_MEMORY) {
1941 unsigned long total_pages = node_present_pages(nid);
1942
1943
1944 if (largest < total_pages) {
1945 largest = total_pages;
1946 prefer = nid;
1947 }
1948
1949
1950 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1951 node_set(nid, interleave_nodes);
1952 }
1953
1954
1955 if (unlikely(nodes_empty(interleave_nodes)))
1956 node_set(prefer, interleave_nodes);
1957
1958 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
1959 printk("numa_policy_init: interleaving failed\n");
1960}
1961
1962
1963void numa_default_policy(void)
1964{
1965 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1966}
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976#define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
1977static const char * const policy_types[] =
1978 { "default", "prefer", "bind", "interleave", "local" };
1979
1980
1981#ifdef CONFIG_TMPFS
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2001{
2002 struct mempolicy *new = NULL;
2003 unsigned short uninitialized_var(mode);
2004 unsigned short uninitialized_var(mode_flags);
2005 nodemask_t nodes;
2006 char *nodelist = strchr(str, ':');
2007 char *flags = strchr(str, '=');
2008 int i;
2009 int err = 1;
2010
2011 if (nodelist) {
2012
2013 *nodelist++ = '\0';
2014 if (nodelist_parse(nodelist, nodes))
2015 goto out;
2016 if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2017 goto out;
2018 } else
2019 nodes_clear(nodes);
2020
2021 if (flags)
2022 *flags++ = '\0';
2023
2024 for (i = 0; i <= MPOL_LOCAL; i++) {
2025 if (!strcmp(str, policy_types[i])) {
2026 mode = i;
2027 break;
2028 }
2029 }
2030 if (i > MPOL_LOCAL)
2031 goto out;
2032
2033 switch (mode) {
2034 case MPOL_PREFERRED:
2035
2036
2037
2038 if (nodelist) {
2039 char *rest = nodelist;
2040 while (isdigit(*rest))
2041 rest++;
2042 if (!*rest)
2043 err = 0;
2044 }
2045 break;
2046 case MPOL_INTERLEAVE:
2047
2048
2049
2050 if (!nodelist)
2051 nodes = node_states[N_HIGH_MEMORY];
2052 err = 0;
2053 break;
2054 case MPOL_LOCAL:
2055
2056
2057
2058 if (nodelist)
2059 goto out;
2060 mode = MPOL_PREFERRED;
2061 break;
2062
2063
2064
2065
2066
2067 }
2068
2069 mode_flags = 0;
2070 if (flags) {
2071
2072
2073
2074
2075 if (!strcmp(flags, "static"))
2076 mode_flags |= MPOL_F_STATIC_NODES;
2077 else if (!strcmp(flags, "relative"))
2078 mode_flags |= MPOL_F_RELATIVE_NODES;
2079 else
2080 err = 1;
2081 }
2082
2083 new = mpol_new(mode, mode_flags, &nodes);
2084 if (IS_ERR(new))
2085 err = 1;
2086 else if (no_context)
2087 new->w.user_nodemask = nodes;
2088
2089out:
2090
2091 if (nodelist)
2092 *--nodelist = ':';
2093 if (flags)
2094 *--flags = '=';
2095 if (!err)
2096 *mpol = new;
2097 return err;
2098}
2099#endif
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2113{
2114 char *p = buffer;
2115 int l;
2116 nodemask_t nodes;
2117 unsigned short mode;
2118 unsigned short flags = pol ? pol->flags : 0;
2119
2120
2121
2122
2123 VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2124
2125 if (!pol || pol == &default_policy)
2126 mode = MPOL_DEFAULT;
2127 else
2128 mode = pol->mode;
2129
2130 switch (mode) {
2131 case MPOL_DEFAULT:
2132 nodes_clear(nodes);
2133 break;
2134
2135 case MPOL_PREFERRED:
2136 nodes_clear(nodes);
2137 if (flags & MPOL_F_LOCAL)
2138 mode = MPOL_LOCAL;
2139 else
2140 node_set(pol->v.preferred_node, nodes);
2141 break;
2142
2143 case MPOL_BIND:
2144
2145 case MPOL_INTERLEAVE:
2146 if (no_context)
2147 nodes = pol->w.user_nodemask;
2148 else
2149 nodes = pol->v.nodes;
2150 break;
2151
2152 default:
2153 BUG();
2154 }
2155
2156 l = strlen(policy_types[mode]);
2157 if (buffer + maxlen < p + l + 1)
2158 return -ENOSPC;
2159
2160 strcpy(p, policy_types[mode]);
2161 p += l;
2162
2163 if (flags & MPOL_MODE_FLAGS) {
2164 if (buffer + maxlen < p + 2)
2165 return -ENOSPC;
2166 *p++ = '=';
2167
2168
2169
2170
2171 if (flags & MPOL_F_STATIC_NODES)
2172 p += snprintf(p, buffer + maxlen - p, "static");
2173 else if (flags & MPOL_F_RELATIVE_NODES)
2174 p += snprintf(p, buffer + maxlen - p, "relative");
2175 }
2176
2177 if (!nodes_empty(nodes)) {
2178 if (buffer + maxlen < p + 2)
2179 return -ENOSPC;
2180 *p++ = ':';
2181 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2182 }
2183 return p - buffer;
2184}
2185
2186struct numa_maps {
2187 unsigned long pages;
2188 unsigned long anon;
2189 unsigned long active;
2190 unsigned long writeback;
2191 unsigned long mapcount_max;
2192 unsigned long dirty;
2193 unsigned long swapcache;
2194 unsigned long node[MAX_NUMNODES];
2195};
2196
2197static void gather_stats(struct page *page, void *private, int pte_dirty)
2198{
2199 struct numa_maps *md = private;
2200 int count = page_mapcount(page);
2201
2202 md->pages++;
2203 if (pte_dirty || PageDirty(page))
2204 md->dirty++;
2205
2206 if (PageSwapCache(page))
2207 md->swapcache++;
2208
2209 if (PageActive(page) || PageUnevictable(page))
2210 md->active++;
2211
2212 if (PageWriteback(page))
2213 md->writeback++;
2214
2215 if (PageAnon(page))
2216 md->anon++;
2217
2218 if (count > md->mapcount_max)
2219 md->mapcount_max = count;
2220
2221 md->node[page_to_nid(page)]++;
2222}
2223
2224#ifdef CONFIG_HUGETLB_PAGE
2225static void check_huge_range(struct vm_area_struct *vma,
2226 unsigned long start, unsigned long end,
2227 struct numa_maps *md)
2228{
2229 unsigned long addr;
2230 struct page *page;
2231 struct hstate *h = hstate_vma(vma);
2232 unsigned long sz = huge_page_size(h);
2233
2234 for (addr = start; addr < end; addr += sz) {
2235 pte_t *ptep = huge_pte_offset(vma->vm_mm,
2236 addr & huge_page_mask(h));
2237 pte_t pte;
2238
2239 if (!ptep)
2240 continue;
2241
2242 pte = *ptep;
2243 if (pte_none(pte))
2244 continue;
2245
2246 page = pte_page(pte);
2247 if (!page)
2248 continue;
2249
2250 gather_stats(page, md, pte_dirty(*ptep));
2251 }
2252}
2253#else
2254static inline void check_huge_range(struct vm_area_struct *vma,
2255 unsigned long start, unsigned long end,
2256 struct numa_maps *md)
2257{
2258}
2259#endif
2260
2261
2262
2263
2264int show_numa_map(struct seq_file *m, void *v)
2265{
2266 struct proc_maps_private *priv = m->private;
2267 struct vm_area_struct *vma = v;
2268 struct numa_maps *md;
2269 struct file *file = vma->vm_file;
2270 struct mm_struct *mm = vma->vm_mm;
2271 struct mempolicy *pol;
2272 int n;
2273 char buffer[50];
2274
2275 if (!mm)
2276 return 0;
2277
2278 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2279 if (!md)
2280 return 0;
2281
2282 pol = get_vma_policy(priv->task, vma, vma->vm_start);
2283 mpol_to_str(buffer, sizeof(buffer), pol, 0);
2284 mpol_cond_put(pol);
2285
2286 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2287
2288 if (file) {
2289 seq_printf(m, " file=");
2290 seq_path(m, &file->f_path, "\n\t= ");
2291 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2292 seq_printf(m, " heap");
2293 } else if (vma->vm_start <= mm->start_stack &&
2294 vma->vm_end >= mm->start_stack) {
2295 seq_printf(m, " stack");
2296 }
2297
2298 if (is_vm_hugetlb_page(vma)) {
2299 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2300 seq_printf(m, " huge");
2301 } else {
2302 check_pgd_range(vma, vma->vm_start, vma->vm_end,
2303 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2304 }
2305
2306 if (!md->pages)
2307 goto out;
2308
2309 if (md->anon)
2310 seq_printf(m," anon=%lu",md->anon);
2311
2312 if (md->dirty)
2313 seq_printf(m," dirty=%lu",md->dirty);
2314
2315 if (md->pages != md->anon && md->pages != md->dirty)
2316 seq_printf(m, " mapped=%lu", md->pages);
2317
2318 if (md->mapcount_max > 1)
2319 seq_printf(m, " mapmax=%lu", md->mapcount_max);
2320
2321 if (md->swapcache)
2322 seq_printf(m," swapcache=%lu", md->swapcache);
2323
2324 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2325 seq_printf(m," active=%lu", md->active);
2326
2327 if (md->writeback)
2328 seq_printf(m," writeback=%lu", md->writeback);
2329
2330 for_each_node_state(n, N_HIGH_MEMORY)
2331 if (md->node[n])
2332 seq_printf(m, " N%d=%lu", n, md->node[n]);
2333out:
2334 seq_putc(m, '\n');
2335 kfree(md);
2336
2337 if (m->count < m->size)
2338 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
2339 return 0;
2340}
2341