1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
74#include <linux/nodemask.h>
75#include <linux/cpuset.h>
76#include <linux/gfp.h>
77#include <linux/slab.h>
78#include <linux/string.h>
79#include <linux/module.h>
80#include <linux/nsproxy.h>
81#include <linux/interrupt.h>
82#include <linux/init.h>
83#include <linux/compat.h>
84#include <linux/swap.h>
85#include <linux/seq_file.h>
86#include <linux/proc_fs.h>
87#include <linux/migrate.h>
88#include <linux/rmap.h>
89#include <linux/security.h>
90#include <linux/syscalls.h>
91#include <linux/ctype.h>
92
93#include <asm/tlbflush.h>
94#include <asm/uaccess.h>
95
96#include "internal.h"
97
98
99#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)
100#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)
101#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)
102
103static struct kmem_cache *policy_cache;
104static struct kmem_cache *sn_cache;
105
106
107
108enum zone_type policy_zone = 0;
109
110
111
112
113struct mempolicy default_policy = {
114 .refcnt = ATOMIC_INIT(1),
115 .mode = MPOL_PREFERRED,
116 .flags = MPOL_F_LOCAL,
117};
118
119static const struct mempolicy_operations {
120 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
121 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
122} mpol_ops[MPOL_MAX];
123
124
125static int is_valid_nodemask(const nodemask_t *nodemask)
126{
127 int nd, k;
128
129
130 k = policy_zone;
131
132 for_each_node_mask(nd, *nodemask) {
133 struct zone *z;
134
135 for (k = 0; k <= policy_zone; k++) {
136 z = &NODE_DATA(nd)->node_zones[k];
137 if (z->present_pages > 0)
138 return 1;
139 }
140 }
141
142 return 0;
143}
144
145static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
146{
147 return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES);
148}
149
150static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
151 const nodemask_t *rel)
152{
153 nodemask_t tmp;
154 nodes_fold(tmp, *orig, nodes_weight(*rel));
155 nodes_onto(*ret, tmp, *rel);
156}
157
158static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
159{
160 if (nodes_empty(*nodes))
161 return -EINVAL;
162 pol->v.nodes = *nodes;
163 return 0;
164}
165
166static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
167{
168 if (!nodes)
169 pol->flags |= MPOL_F_LOCAL;
170 else if (nodes_empty(*nodes))
171 return -EINVAL;
172 else
173 pol->v.preferred_node = first_node(*nodes);
174 return 0;
175}
176
177static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
178{
179 if (!is_valid_nodemask(nodes))
180 return -EINVAL;
181 pol->v.nodes = *nodes;
182 return 0;
183}
184
185
186static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
187 nodemask_t *nodes)
188{
189 struct mempolicy *policy;
190 nodemask_t cpuset_context_nmask;
191 int ret;
192
193 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
194 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
195
196 if (mode == MPOL_DEFAULT) {
197 if (nodes && !nodes_empty(*nodes))
198 return ERR_PTR(-EINVAL);
199 return NULL;
200 }
201 VM_BUG_ON(!nodes);
202
203
204
205
206
207
208 if (mode == MPOL_PREFERRED) {
209 if (nodes_empty(*nodes)) {
210 if (((flags & MPOL_F_STATIC_NODES) ||
211 (flags & MPOL_F_RELATIVE_NODES)))
212 return ERR_PTR(-EINVAL);
213 nodes = NULL;
214 }
215 } else if (nodes_empty(*nodes))
216 return ERR_PTR(-EINVAL);
217 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
218 if (!policy)
219 return ERR_PTR(-ENOMEM);
220 atomic_set(&policy->refcnt, 1);
221 policy->mode = mode;
222 policy->flags = flags;
223
224 if (nodes) {
225
226
227
228 cpuset_update_task_memory_state();
229 if (flags & MPOL_F_RELATIVE_NODES)
230 mpol_relative_nodemask(&cpuset_context_nmask, nodes,
231 &cpuset_current_mems_allowed);
232 else
233 nodes_and(cpuset_context_nmask, *nodes,
234 cpuset_current_mems_allowed);
235 if (mpol_store_user_nodemask(policy))
236 policy->w.user_nodemask = *nodes;
237 else
238 policy->w.cpuset_mems_allowed =
239 cpuset_mems_allowed(current);
240 }
241
242 ret = mpol_ops[mode].create(policy,
243 nodes ? &cpuset_context_nmask : NULL);
244 if (ret < 0) {
245 kmem_cache_free(policy_cache, policy);
246 return ERR_PTR(ret);
247 }
248 return policy;
249}
250
251
252void __mpol_put(struct mempolicy *p)
253{
254 if (!atomic_dec_and_test(&p->refcnt))
255 return;
256 kmem_cache_free(policy_cache, p);
257}
258
259static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
260{
261}
262
263static void mpol_rebind_nodemask(struct mempolicy *pol,
264 const nodemask_t *nodes)
265{
266 nodemask_t tmp;
267
268 if (pol->flags & MPOL_F_STATIC_NODES)
269 nodes_and(tmp, pol->w.user_nodemask, *nodes);
270 else if (pol->flags & MPOL_F_RELATIVE_NODES)
271 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
272 else {
273 nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
274 *nodes);
275 pol->w.cpuset_mems_allowed = *nodes;
276 }
277
278 pol->v.nodes = tmp;
279 if (!node_isset(current->il_next, tmp)) {
280 current->il_next = next_node(current->il_next, tmp);
281 if (current->il_next >= MAX_NUMNODES)
282 current->il_next = first_node(tmp);
283 if (current->il_next >= MAX_NUMNODES)
284 current->il_next = numa_node_id();
285 }
286}
287
288static void mpol_rebind_preferred(struct mempolicy *pol,
289 const nodemask_t *nodes)
290{
291 nodemask_t tmp;
292
293 if (pol->flags & MPOL_F_STATIC_NODES) {
294 int node = first_node(pol->w.user_nodemask);
295
296 if (node_isset(node, *nodes)) {
297 pol->v.preferred_node = node;
298 pol->flags &= ~MPOL_F_LOCAL;
299 } else
300 pol->flags |= MPOL_F_LOCAL;
301 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
302 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
303 pol->v.preferred_node = first_node(tmp);
304 } else if (!(pol->flags & MPOL_F_LOCAL)) {
305 pol->v.preferred_node = node_remap(pol->v.preferred_node,
306 pol->w.cpuset_mems_allowed,
307 *nodes);
308 pol->w.cpuset_mems_allowed = *nodes;
309 }
310}
311
312
313static void mpol_rebind_policy(struct mempolicy *pol,
314 const nodemask_t *newmask)
315{
316 if (!pol)
317 return;
318 if (!mpol_store_user_nodemask(pol) &&
319 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
320 return;
321 mpol_ops[pol->mode].rebind(pol, newmask);
322}
323
324
325
326
327
328
329void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
330{
331 mpol_rebind_policy(tsk->mempolicy, new);
332}
333
334
335
336
337
338
339
340void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
341{
342 struct vm_area_struct *vma;
343
344 down_write(&mm->mmap_sem);
345 for (vma = mm->mmap; vma; vma = vma->vm_next)
346 mpol_rebind_policy(vma->vm_policy, new);
347 up_write(&mm->mmap_sem);
348}
349
350static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
351 [MPOL_DEFAULT] = {
352 .rebind = mpol_rebind_default,
353 },
354 [MPOL_INTERLEAVE] = {
355 .create = mpol_new_interleave,
356 .rebind = mpol_rebind_nodemask,
357 },
358 [MPOL_PREFERRED] = {
359 .create = mpol_new_preferred,
360 .rebind = mpol_rebind_preferred,
361 },
362 [MPOL_BIND] = {
363 .create = mpol_new_bind,
364 .rebind = mpol_rebind_nodemask,
365 },
366};
367
368static void gather_stats(struct page *, void *, int pte_dirty);
369static void migrate_page_add(struct page *page, struct list_head *pagelist,
370 unsigned long flags);
371
372
373static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
374 unsigned long addr, unsigned long end,
375 const nodemask_t *nodes, unsigned long flags,
376 void *private)
377{
378 pte_t *orig_pte;
379 pte_t *pte;
380 spinlock_t *ptl;
381
382 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
383 do {
384 struct page *page;
385 int nid;
386
387 if (!pte_present(*pte))
388 continue;
389 page = vm_normal_page(vma, addr, *pte);
390 if (!page)
391 continue;
392
393
394
395
396
397
398
399
400
401
402
403 if (PageReserved(page))
404 continue;
405 nid = page_to_nid(page);
406 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
407 continue;
408
409 if (flags & MPOL_MF_STATS)
410 gather_stats(page, private, pte_dirty(*pte));
411 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
412 migrate_page_add(page, private, flags);
413 else
414 break;
415 } while (pte++, addr += PAGE_SIZE, addr != end);
416 pte_unmap_unlock(orig_pte, ptl);
417 return addr != end;
418}
419
420static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
421 unsigned long addr, unsigned long end,
422 const nodemask_t *nodes, unsigned long flags,
423 void *private)
424{
425 pmd_t *pmd;
426 unsigned long next;
427
428 pmd = pmd_offset(pud, addr);
429 do {
430 next = pmd_addr_end(addr, end);
431 if (pmd_none_or_clear_bad(pmd))
432 continue;
433 if (check_pte_range(vma, pmd, addr, next, nodes,
434 flags, private))
435 return -EIO;
436 } while (pmd++, addr = next, addr != end);
437 return 0;
438}
439
440static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
441 unsigned long addr, unsigned long end,
442 const nodemask_t *nodes, unsigned long flags,
443 void *private)
444{
445 pud_t *pud;
446 unsigned long next;
447
448 pud = pud_offset(pgd, addr);
449 do {
450 next = pud_addr_end(addr, end);
451 if (pud_none_or_clear_bad(pud))
452 continue;
453 if (check_pmd_range(vma, pud, addr, next, nodes,
454 flags, private))
455 return -EIO;
456 } while (pud++, addr = next, addr != end);
457 return 0;
458}
459
460static inline int check_pgd_range(struct vm_area_struct *vma,
461 unsigned long addr, unsigned long end,
462 const nodemask_t *nodes, unsigned long flags,
463 void *private)
464{
465 pgd_t *pgd;
466 unsigned long next;
467
468 pgd = pgd_offset(vma->vm_mm, addr);
469 do {
470 next = pgd_addr_end(addr, end);
471 if (pgd_none_or_clear_bad(pgd))
472 continue;
473 if (check_pud_range(vma, pgd, addr, next, nodes,
474 flags, private))
475 return -EIO;
476 } while (pgd++, addr = next, addr != end);
477 return 0;
478}
479
480
481
482
483
484
485static struct vm_area_struct *
486check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
487 const nodemask_t *nodes, unsigned long flags, void *private)
488{
489 int err;
490 struct vm_area_struct *first, *vma, *prev;
491
492
493 first = find_vma(mm, start);
494 if (!first)
495 return ERR_PTR(-EFAULT);
496 prev = NULL;
497 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
498 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
499 if (!vma->vm_next && vma->vm_end < end)
500 return ERR_PTR(-EFAULT);
501 if (prev && prev->vm_end < vma->vm_start)
502 return ERR_PTR(-EFAULT);
503 }
504 if (!is_vm_hugetlb_page(vma) &&
505 ((flags & MPOL_MF_STRICT) ||
506 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
507 vma_migratable(vma)))) {
508 unsigned long endvma = vma->vm_end;
509
510 if (endvma > end)
511 endvma = end;
512 if (vma->vm_start > start)
513 start = vma->vm_start;
514 err = check_pgd_range(vma, start, endvma, nodes,
515 flags, private);
516 if (err) {
517 first = ERR_PTR(err);
518 break;
519 }
520 }
521 prev = vma;
522 }
523 return first;
524}
525
526
527static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
528{
529 int err = 0;
530 struct mempolicy *old = vma->vm_policy;
531
532 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
533 vma->vm_start, vma->vm_end, vma->vm_pgoff,
534 vma->vm_ops, vma->vm_file,
535 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
536
537 if (vma->vm_ops && vma->vm_ops->set_policy)
538 err = vma->vm_ops->set_policy(vma, new);
539 if (!err) {
540 mpol_get(new);
541 vma->vm_policy = new;
542 mpol_put(old);
543 }
544 return err;
545}
546
547
548static int mbind_range(struct vm_area_struct *vma, unsigned long start,
549 unsigned long end, struct mempolicy *new)
550{
551 struct vm_area_struct *next;
552 int err;
553
554 err = 0;
555 for (; vma && vma->vm_start < end; vma = next) {
556 next = vma->vm_next;
557 if (vma->vm_start < start)
558 err = split_vma(vma->vm_mm, vma, start, 1);
559 if (!err && vma->vm_end > end)
560 err = split_vma(vma->vm_mm, vma, end, 0);
561 if (!err)
562 err = policy_vma(vma, new);
563 if (err)
564 break;
565 }
566 return err;
567}
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586void mpol_fix_fork_child_flag(struct task_struct *p)
587{
588 if (p->mempolicy)
589 p->flags |= PF_MEMPOLICY;
590 else
591 p->flags &= ~PF_MEMPOLICY;
592}
593
594static void mpol_set_task_struct_flag(void)
595{
596 mpol_fix_fork_child_flag(current);
597}
598
599
600static long do_set_mempolicy(unsigned short mode, unsigned short flags,
601 nodemask_t *nodes)
602{
603 struct mempolicy *new;
604 struct mm_struct *mm = current->mm;
605
606 new = mpol_new(mode, flags, nodes);
607 if (IS_ERR(new))
608 return PTR_ERR(new);
609
610
611
612
613
614
615
616 if (mm)
617 down_write(&mm->mmap_sem);
618 mpol_put(current->mempolicy);
619 current->mempolicy = new;
620 mpol_set_task_struct_flag();
621 if (new && new->mode == MPOL_INTERLEAVE &&
622 nodes_weight(new->v.nodes))
623 current->il_next = first_node(new->v.nodes);
624 if (mm)
625 up_write(&mm->mmap_sem);
626
627 return 0;
628}
629
630
631
632
633static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
634{
635 nodes_clear(*nodes);
636 if (p == &default_policy)
637 return;
638
639 switch (p->mode) {
640 case MPOL_BIND:
641
642 case MPOL_INTERLEAVE:
643 *nodes = p->v.nodes;
644 break;
645 case MPOL_PREFERRED:
646 if (!(p->flags & MPOL_F_LOCAL))
647 node_set(p->v.preferred_node, *nodes);
648
649 break;
650 default:
651 BUG();
652 }
653}
654
655static int lookup_node(struct mm_struct *mm, unsigned long addr)
656{
657 struct page *p;
658 int err;
659
660 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
661 if (err >= 0) {
662 err = page_to_nid(p);
663 put_page(p);
664 }
665 return err;
666}
667
668
669static long do_get_mempolicy(int *policy, nodemask_t *nmask,
670 unsigned long addr, unsigned long flags)
671{
672 int err;
673 struct mm_struct *mm = current->mm;
674 struct vm_area_struct *vma = NULL;
675 struct mempolicy *pol = current->mempolicy;
676
677 cpuset_update_task_memory_state();
678 if (flags &
679 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
680 return -EINVAL;
681
682 if (flags & MPOL_F_MEMS_ALLOWED) {
683 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
684 return -EINVAL;
685 *policy = 0;
686 *nmask = cpuset_current_mems_allowed;
687 return 0;
688 }
689
690 if (flags & MPOL_F_ADDR) {
691
692
693
694
695
696 down_read(&mm->mmap_sem);
697 vma = find_vma_intersection(mm, addr, addr+1);
698 if (!vma) {
699 up_read(&mm->mmap_sem);
700 return -EFAULT;
701 }
702 if (vma->vm_ops && vma->vm_ops->get_policy)
703 pol = vma->vm_ops->get_policy(vma, addr);
704 else
705 pol = vma->vm_policy;
706 } else if (addr)
707 return -EINVAL;
708
709 if (!pol)
710 pol = &default_policy;
711
712 if (flags & MPOL_F_NODE) {
713 if (flags & MPOL_F_ADDR) {
714 err = lookup_node(mm, addr);
715 if (err < 0)
716 goto out;
717 *policy = err;
718 } else if (pol == current->mempolicy &&
719 pol->mode == MPOL_INTERLEAVE) {
720 *policy = current->il_next;
721 } else {
722 err = -EINVAL;
723 goto out;
724 }
725 } else {
726 *policy = pol == &default_policy ? MPOL_DEFAULT :
727 pol->mode;
728
729
730
731
732 *policy |= (pol->flags & MPOL_MODE_FLAGS);
733 }
734
735 if (vma) {
736 up_read(¤t->mm->mmap_sem);
737 vma = NULL;
738 }
739
740 err = 0;
741 if (nmask)
742 get_policy_nodemask(pol, nmask);
743
744 out:
745 mpol_cond_put(pol);
746 if (vma)
747 up_read(¤t->mm->mmap_sem);
748 return err;
749}
750
751#ifdef CONFIG_MIGRATION
752
753
754
755static void migrate_page_add(struct page *page, struct list_head *pagelist,
756 unsigned long flags)
757{
758
759
760
761 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
762 if (!isolate_lru_page(page)) {
763 list_add_tail(&page->lru, pagelist);
764 }
765 }
766}
767
768static struct page *new_node_page(struct page *page, unsigned long node, int **x)
769{
770 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
771}
772
773
774
775
776
777static int migrate_to_node(struct mm_struct *mm, int source, int dest,
778 int flags)
779{
780 nodemask_t nmask;
781 LIST_HEAD(pagelist);
782 int err = 0;
783
784 nodes_clear(nmask);
785 node_set(source, nmask);
786
787 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
788 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
789
790 if (!list_empty(&pagelist))
791 err = migrate_pages(&pagelist, new_node_page, dest);
792
793 return err;
794}
795
796
797
798
799
800
801
802int do_migrate_pages(struct mm_struct *mm,
803 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
804{
805 int busy = 0;
806 int err;
807 nodemask_t tmp;
808
809 err = migrate_prep();
810 if (err)
811 return err;
812
813 down_read(&mm->mmap_sem);
814
815 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
816 if (err)
817 goto out;
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850 tmp = *from_nodes;
851 while (!nodes_empty(tmp)) {
852 int s,d;
853 int source = -1;
854 int dest = 0;
855
856 for_each_node_mask(s, tmp) {
857 d = node_remap(s, *from_nodes, *to_nodes);
858 if (s == d)
859 continue;
860
861 source = s;
862 dest = d;
863
864
865 if (!node_isset(dest, tmp))
866 break;
867 }
868 if (source == -1)
869 break;
870
871 node_clear(source, tmp);
872 err = migrate_to_node(mm, source, dest, flags);
873 if (err > 0)
874 busy += err;
875 if (err < 0)
876 break;
877 }
878out:
879 up_read(&mm->mmap_sem);
880 if (err < 0)
881 return err;
882 return busy;
883
884}
885
886
887
888
889
890
891
892
893static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
894{
895 struct vm_area_struct *vma = (struct vm_area_struct *)private;
896 unsigned long uninitialized_var(address);
897
898 while (vma) {
899 address = page_address_in_vma(page, vma);
900 if (address != -EFAULT)
901 break;
902 vma = vma->vm_next;
903 }
904
905
906
907
908 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
909}
910#else
911
912static void migrate_page_add(struct page *page, struct list_head *pagelist,
913 unsigned long flags)
914{
915}
916
917int do_migrate_pages(struct mm_struct *mm,
918 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
919{
920 return -ENOSYS;
921}
922
923static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
924{
925 return NULL;
926}
927#endif
928
929static long do_mbind(unsigned long start, unsigned long len,
930 unsigned short mode, unsigned short mode_flags,
931 nodemask_t *nmask, unsigned long flags)
932{
933 struct vm_area_struct *vma;
934 struct mm_struct *mm = current->mm;
935 struct mempolicy *new;
936 unsigned long end;
937 int err;
938 LIST_HEAD(pagelist);
939
940 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
941 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
942 return -EINVAL;
943 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
944 return -EPERM;
945
946 if (start & ~PAGE_MASK)
947 return -EINVAL;
948
949 if (mode == MPOL_DEFAULT)
950 flags &= ~MPOL_MF_STRICT;
951
952 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
953 end = start + len;
954
955 if (end < start)
956 return -EINVAL;
957 if (end == start)
958 return 0;
959
960 new = mpol_new(mode, mode_flags, nmask);
961 if (IS_ERR(new))
962 return PTR_ERR(new);
963
964
965
966
967
968 if (!new)
969 flags |= MPOL_MF_DISCONTIG_OK;
970
971 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
972 start, start + len, mode, mode_flags,
973 nmask ? nodes_addr(*nmask)[0] : -1);
974
975 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
976
977 err = migrate_prep();
978 if (err)
979 return err;
980 }
981 down_write(&mm->mmap_sem);
982 vma = check_range(mm, start, end, nmask,
983 flags | MPOL_MF_INVERT, &pagelist);
984
985 err = PTR_ERR(vma);
986 if (!IS_ERR(vma)) {
987 int nr_failed = 0;
988
989 err = mbind_range(vma, start, end, new);
990
991 if (!list_empty(&pagelist))
992 nr_failed = migrate_pages(&pagelist, new_vma_page,
993 (unsigned long)vma);
994
995 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
996 err = -EIO;
997 }
998
999 up_write(&mm->mmap_sem);
1000 mpol_put(new);
1001 return err;
1002}
1003
1004
1005
1006
1007
1008
1009static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1010 unsigned long maxnode)
1011{
1012 unsigned long k;
1013 unsigned long nlongs;
1014 unsigned long endmask;
1015
1016 --maxnode;
1017 nodes_clear(*nodes);
1018 if (maxnode == 0 || !nmask)
1019 return 0;
1020 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1021 return -EINVAL;
1022
1023 nlongs = BITS_TO_LONGS(maxnode);
1024 if ((maxnode % BITS_PER_LONG) == 0)
1025 endmask = ~0UL;
1026 else
1027 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1028
1029
1030
1031 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1032 if (nlongs > PAGE_SIZE/sizeof(long))
1033 return -EINVAL;
1034 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1035 unsigned long t;
1036 if (get_user(t, nmask + k))
1037 return -EFAULT;
1038 if (k == nlongs - 1) {
1039 if (t & endmask)
1040 return -EINVAL;
1041 } else if (t)
1042 return -EINVAL;
1043 }
1044 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1045 endmask = ~0UL;
1046 }
1047
1048 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1049 return -EFAULT;
1050 nodes_addr(*nodes)[nlongs-1] &= endmask;
1051 return 0;
1052}
1053
1054
1055static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1056 nodemask_t *nodes)
1057{
1058 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1059 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1060
1061 if (copy > nbytes) {
1062 if (copy > PAGE_SIZE)
1063 return -EINVAL;
1064 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1065 return -EFAULT;
1066 copy = nbytes;
1067 }
1068 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1069}
1070
1071SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1072 unsigned long, mode, unsigned long __user *, nmask,
1073 unsigned long, maxnode, unsigned, flags)
1074{
1075 nodemask_t nodes;
1076 int err;
1077 unsigned short mode_flags;
1078
1079 mode_flags = mode & MPOL_MODE_FLAGS;
1080 mode &= ~MPOL_MODE_FLAGS;
1081 if (mode >= MPOL_MAX)
1082 return -EINVAL;
1083 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1084 (mode_flags & MPOL_F_RELATIVE_NODES))
1085 return -EINVAL;
1086 err = get_nodes(&nodes, nmask, maxnode);
1087 if (err)
1088 return err;
1089 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1090}
1091
1092
1093SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1094 unsigned long, maxnode)
1095{
1096 int err;
1097 nodemask_t nodes;
1098 unsigned short flags;
1099
1100 flags = mode & MPOL_MODE_FLAGS;
1101 mode &= ~MPOL_MODE_FLAGS;
1102 if ((unsigned int)mode >= MPOL_MAX)
1103 return -EINVAL;
1104 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1105 return -EINVAL;
1106 err = get_nodes(&nodes, nmask, maxnode);
1107 if (err)
1108 return err;
1109 return do_set_mempolicy(mode, flags, &nodes);
1110}
1111
1112SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1113 const unsigned long __user *, old_nodes,
1114 const unsigned long __user *, new_nodes)
1115{
1116 const struct cred *cred = current_cred(), *tcred;
1117 struct mm_struct *mm;
1118 struct task_struct *task;
1119 nodemask_t old;
1120 nodemask_t new;
1121 nodemask_t task_nodes;
1122 int err;
1123
1124 err = get_nodes(&old, old_nodes, maxnode);
1125 if (err)
1126 return err;
1127
1128 err = get_nodes(&new, new_nodes, maxnode);
1129 if (err)
1130 return err;
1131
1132
1133 read_lock(&tasklist_lock);
1134 task = pid ? find_task_by_vpid(pid) : current;
1135 if (!task) {
1136 read_unlock(&tasklist_lock);
1137 return -ESRCH;
1138 }
1139 mm = get_task_mm(task);
1140 read_unlock(&tasklist_lock);
1141
1142 if (!mm)
1143 return -EINVAL;
1144
1145
1146
1147
1148
1149
1150
1151 rcu_read_lock();
1152 tcred = __task_cred(task);
1153 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1154 cred->uid != tcred->suid && cred->uid != tcred->uid &&
1155 !capable(CAP_SYS_NICE)) {
1156 rcu_read_unlock();
1157 err = -EPERM;
1158 goto out;
1159 }
1160 rcu_read_unlock();
1161
1162 task_nodes = cpuset_mems_allowed(task);
1163
1164 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
1165 err = -EPERM;
1166 goto out;
1167 }
1168
1169 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
1170 err = -EINVAL;
1171 goto out;
1172 }
1173
1174 err = security_task_movememory(task);
1175 if (err)
1176 goto out;
1177
1178 err = do_migrate_pages(mm, &old, &new,
1179 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1180out:
1181 mmput(mm);
1182 return err;
1183}
1184
1185
1186
1187SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1188 unsigned long __user *, nmask, unsigned long, maxnode,
1189 unsigned long, addr, unsigned long, flags)
1190{
1191 int err;
1192 int uninitialized_var(pval);
1193 nodemask_t nodes;
1194
1195 if (nmask != NULL && maxnode < MAX_NUMNODES)
1196 return -EINVAL;
1197
1198 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1199
1200 if (err)
1201 return err;
1202
1203 if (policy && put_user(pval, policy))
1204 return -EFAULT;
1205
1206 if (nmask)
1207 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1208
1209 return err;
1210}
1211
1212#ifdef CONFIG_COMPAT
1213
1214asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1215 compat_ulong_t __user *nmask,
1216 compat_ulong_t maxnode,
1217 compat_ulong_t addr, compat_ulong_t flags)
1218{
1219 long err;
1220 unsigned long __user *nm = NULL;
1221 unsigned long nr_bits, alloc_size;
1222 DECLARE_BITMAP(bm, MAX_NUMNODES);
1223
1224 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1225 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1226
1227 if (nmask)
1228 nm = compat_alloc_user_space(alloc_size);
1229
1230 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1231
1232 if (!err && nmask) {
1233 err = copy_from_user(bm, nm, alloc_size);
1234
1235 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1236 err |= compat_put_bitmap(nmask, bm, nr_bits);
1237 }
1238
1239 return err;
1240}
1241
1242asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1243 compat_ulong_t maxnode)
1244{
1245 long err = 0;
1246 unsigned long __user *nm = NULL;
1247 unsigned long nr_bits, alloc_size;
1248 DECLARE_BITMAP(bm, MAX_NUMNODES);
1249
1250 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1251 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1252
1253 if (nmask) {
1254 err = compat_get_bitmap(bm, nmask, nr_bits);
1255 nm = compat_alloc_user_space(alloc_size);
1256 err |= copy_to_user(nm, bm, alloc_size);
1257 }
1258
1259 if (err)
1260 return -EFAULT;
1261
1262 return sys_set_mempolicy(mode, nm, nr_bits+1);
1263}
1264
1265asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1266 compat_ulong_t mode, compat_ulong_t __user *nmask,
1267 compat_ulong_t maxnode, compat_ulong_t flags)
1268{
1269 long err = 0;
1270 unsigned long __user *nm = NULL;
1271 unsigned long nr_bits, alloc_size;
1272 nodemask_t bm;
1273
1274 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1275 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1276
1277 if (nmask) {
1278 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1279 nm = compat_alloc_user_space(alloc_size);
1280 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1281 }
1282
1283 if (err)
1284 return -EFAULT;
1285
1286 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1287}
1288
1289#endif
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307static struct mempolicy *get_vma_policy(struct task_struct *task,
1308 struct vm_area_struct *vma, unsigned long addr)
1309{
1310 struct mempolicy *pol = task->mempolicy;
1311
1312 if (vma) {
1313 if (vma->vm_ops && vma->vm_ops->get_policy) {
1314 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1315 addr);
1316 if (vpol)
1317 pol = vpol;
1318 } else if (vma->vm_policy)
1319 pol = vma->vm_policy;
1320 }
1321 if (!pol)
1322 pol = &default_policy;
1323 return pol;
1324}
1325
1326
1327
1328
1329
1330static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1331{
1332
1333 if (unlikely(policy->mode == MPOL_BIND) &&
1334 gfp_zone(gfp) >= policy_zone &&
1335 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1336 return &policy->v.nodes;
1337
1338 return NULL;
1339}
1340
1341
1342static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
1343{
1344 int nd = numa_node_id();
1345
1346 switch (policy->mode) {
1347 case MPOL_PREFERRED:
1348 if (!(policy->flags & MPOL_F_LOCAL))
1349 nd = policy->v.preferred_node;
1350 break;
1351 case MPOL_BIND:
1352
1353
1354
1355
1356
1357
1358 if (unlikely(gfp & __GFP_THISNODE) &&
1359 unlikely(!node_isset(nd, policy->v.nodes)))
1360 nd = first_node(policy->v.nodes);
1361 break;
1362 case MPOL_INTERLEAVE:
1363 break;
1364 default:
1365 BUG();
1366 }
1367 return node_zonelist(nd, gfp);
1368}
1369
1370
1371static unsigned interleave_nodes(struct mempolicy *policy)
1372{
1373 unsigned nid, next;
1374 struct task_struct *me = current;
1375
1376 nid = me->il_next;
1377 next = next_node(nid, policy->v.nodes);
1378 if (next >= MAX_NUMNODES)
1379 next = first_node(policy->v.nodes);
1380 if (next < MAX_NUMNODES)
1381 me->il_next = next;
1382 return nid;
1383}
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393unsigned slab_node(struct mempolicy *policy)
1394{
1395 if (!policy || policy->flags & MPOL_F_LOCAL)
1396 return numa_node_id();
1397
1398 switch (policy->mode) {
1399 case MPOL_PREFERRED:
1400
1401
1402
1403 return policy->v.preferred_node;
1404
1405 case MPOL_INTERLEAVE:
1406 return interleave_nodes(policy);
1407
1408 case MPOL_BIND: {
1409
1410
1411
1412
1413 struct zonelist *zonelist;
1414 struct zone *zone;
1415 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1416 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1417 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1418 &policy->v.nodes,
1419 &zone);
1420 return zone->node;
1421 }
1422
1423 default:
1424 BUG();
1425 }
1426}
1427
1428
1429static unsigned offset_il_node(struct mempolicy *pol,
1430 struct vm_area_struct *vma, unsigned long off)
1431{
1432 unsigned nnodes = nodes_weight(pol->v.nodes);
1433 unsigned target;
1434 int c;
1435 int nid = -1;
1436
1437 if (!nnodes)
1438 return numa_node_id();
1439 target = (unsigned int)off % nnodes;
1440 c = 0;
1441 do {
1442 nid = next_node(nid, pol->v.nodes);
1443 c++;
1444 } while (c <= target);
1445 return nid;
1446}
1447
1448
1449static inline unsigned interleave_nid(struct mempolicy *pol,
1450 struct vm_area_struct *vma, unsigned long addr, int shift)
1451{
1452 if (vma) {
1453 unsigned long off;
1454
1455
1456
1457
1458
1459
1460
1461
1462 BUG_ON(shift < PAGE_SHIFT);
1463 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1464 off += (addr - vma->vm_start) >> shift;
1465 return offset_il_node(pol, vma, off);
1466 } else
1467 return interleave_nodes(pol);
1468}
1469
1470#ifdef CONFIG_HUGETLBFS
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1485 gfp_t gfp_flags, struct mempolicy **mpol,
1486 nodemask_t **nodemask)
1487{
1488 struct zonelist *zl;
1489
1490 *mpol = get_vma_policy(current, vma, addr);
1491 *nodemask = NULL;
1492
1493 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1494 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1495 huge_page_shift(hstate_vma(vma))), gfp_flags);
1496 } else {
1497 zl = policy_zonelist(gfp_flags, *mpol);
1498 if ((*mpol)->mode == MPOL_BIND)
1499 *nodemask = &(*mpol)->v.nodes;
1500 }
1501 return zl;
1502}
1503#endif
1504
1505
1506
1507static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1508 unsigned nid)
1509{
1510 struct zonelist *zl;
1511 struct page *page;
1512
1513 zl = node_zonelist(nid, gfp);
1514 page = __alloc_pages(gfp, order, zl);
1515 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1516 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1517 return page;
1518}
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542struct page *
1543alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1544{
1545 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1546 struct zonelist *zl;
1547
1548 cpuset_update_task_memory_state();
1549
1550 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1551 unsigned nid;
1552
1553 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1554 mpol_cond_put(pol);
1555 return alloc_page_interleave(gfp, 0, nid);
1556 }
1557 zl = policy_zonelist(gfp, pol);
1558 if (unlikely(mpol_needs_cond_ref(pol))) {
1559
1560
1561
1562 struct page *page = __alloc_pages_nodemask(gfp, 0,
1563 zl, policy_nodemask(gfp, pol));
1564 __mpol_put(pol);
1565 return page;
1566 }
1567
1568
1569
1570 return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
1571}
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1593{
1594 struct mempolicy *pol = current->mempolicy;
1595
1596 if ((gfp & __GFP_WAIT) && !in_interrupt())
1597 cpuset_update_task_memory_state();
1598 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1599 pol = &default_policy;
1600
1601
1602
1603
1604
1605 if (pol->mode == MPOL_INTERLEAVE)
1606 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1607 return __alloc_pages_nodemask(gfp, order,
1608 policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
1609}
1610EXPORT_SYMBOL(alloc_pages_current);
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621struct mempolicy *__mpol_dup(struct mempolicy *old)
1622{
1623 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1624
1625 if (!new)
1626 return ERR_PTR(-ENOMEM);
1627 if (current_cpuset_is_being_rebound()) {
1628 nodemask_t mems = cpuset_mems_allowed(current);
1629 mpol_rebind_policy(old, &mems);
1630 }
1631 *new = *old;
1632 atomic_set(&new->refcnt, 1);
1633 return new;
1634}
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1647 struct mempolicy *frompol)
1648{
1649 if (!mpol_needs_cond_ref(frompol))
1650 return frompol;
1651
1652 *tompol = *frompol;
1653 tompol->flags &= ~MPOL_F_SHARED;
1654 __mpol_put(frompol);
1655 return tompol;
1656}
1657
1658static int mpol_match_intent(const struct mempolicy *a,
1659 const struct mempolicy *b)
1660{
1661 if (a->flags != b->flags)
1662 return 0;
1663 if (!mpol_store_user_nodemask(a))
1664 return 1;
1665 return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
1666}
1667
1668
1669int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1670{
1671 if (!a || !b)
1672 return 0;
1673 if (a->mode != b->mode)
1674 return 0;
1675 if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
1676 return 0;
1677 switch (a->mode) {
1678 case MPOL_BIND:
1679
1680 case MPOL_INTERLEAVE:
1681 return nodes_equal(a->v.nodes, b->v.nodes);
1682 case MPOL_PREFERRED:
1683 return a->v.preferred_node == b->v.preferred_node &&
1684 a->flags == b->flags;
1685 default:
1686 BUG();
1687 return 0;
1688 }
1689}
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702static struct sp_node *
1703sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1704{
1705 struct rb_node *n = sp->root.rb_node;
1706
1707 while (n) {
1708 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1709
1710 if (start >= p->end)
1711 n = n->rb_right;
1712 else if (end <= p->start)
1713 n = n->rb_left;
1714 else
1715 break;
1716 }
1717 if (!n)
1718 return NULL;
1719 for (;;) {
1720 struct sp_node *w = NULL;
1721 struct rb_node *prev = rb_prev(n);
1722 if (!prev)
1723 break;
1724 w = rb_entry(prev, struct sp_node, nd);
1725 if (w->end <= start)
1726 break;
1727 n = prev;
1728 }
1729 return rb_entry(n, struct sp_node, nd);
1730}
1731
1732
1733
1734static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1735{
1736 struct rb_node **p = &sp->root.rb_node;
1737 struct rb_node *parent = NULL;
1738 struct sp_node *nd;
1739
1740 while (*p) {
1741 parent = *p;
1742 nd = rb_entry(parent, struct sp_node, nd);
1743 if (new->start < nd->start)
1744 p = &(*p)->rb_left;
1745 else if (new->end > nd->end)
1746 p = &(*p)->rb_right;
1747 else
1748 BUG();
1749 }
1750 rb_link_node(&new->nd, parent, p);
1751 rb_insert_color(&new->nd, &sp->root);
1752 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1753 new->policy ? new->policy->mode : 0);
1754}
1755
1756
1757struct mempolicy *
1758mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1759{
1760 struct mempolicy *pol = NULL;
1761 struct sp_node *sn;
1762
1763 if (!sp->root.rb_node)
1764 return NULL;
1765 spin_lock(&sp->lock);
1766 sn = sp_lookup(sp, idx, idx+1);
1767 if (sn) {
1768 mpol_get(sn->policy);
1769 pol = sn->policy;
1770 }
1771 spin_unlock(&sp->lock);
1772 return pol;
1773}
1774
1775static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1776{
1777 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1778 rb_erase(&n->nd, &sp->root);
1779 mpol_put(n->policy);
1780 kmem_cache_free(sn_cache, n);
1781}
1782
1783static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1784 struct mempolicy *pol)
1785{
1786 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1787
1788 if (!n)
1789 return NULL;
1790 n->start = start;
1791 n->end = end;
1792 mpol_get(pol);
1793 pol->flags |= MPOL_F_SHARED;
1794 n->policy = pol;
1795 return n;
1796}
1797
1798
1799static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1800 unsigned long end, struct sp_node *new)
1801{
1802 struct sp_node *n, *new2 = NULL;
1803
1804restart:
1805 spin_lock(&sp->lock);
1806 n = sp_lookup(sp, start, end);
1807
1808 while (n && n->start < end) {
1809 struct rb_node *next = rb_next(&n->nd);
1810 if (n->start >= start) {
1811 if (n->end <= end)
1812 sp_delete(sp, n);
1813 else
1814 n->start = end;
1815 } else {
1816
1817 if (n->end > end) {
1818 if (!new2) {
1819 spin_unlock(&sp->lock);
1820 new2 = sp_alloc(end, n->end, n->policy);
1821 if (!new2)
1822 return -ENOMEM;
1823 goto restart;
1824 }
1825 n->end = start;
1826 sp_insert(sp, new2);
1827 new2 = NULL;
1828 break;
1829 } else
1830 n->end = start;
1831 }
1832 if (!next)
1833 break;
1834 n = rb_entry(next, struct sp_node, nd);
1835 }
1836 if (new)
1837 sp_insert(sp, new);
1838 spin_unlock(&sp->lock);
1839 if (new2) {
1840 mpol_put(new2->policy);
1841 kmem_cache_free(sn_cache, new2);
1842 }
1843 return 0;
1844}
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
1856{
1857 sp->root = RB_ROOT;
1858 spin_lock_init(&sp->lock);
1859
1860 if (mpol) {
1861 struct vm_area_struct pvma;
1862 struct mempolicy *new;
1863
1864
1865 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
1866 mpol_put(mpol);
1867 if (IS_ERR(new))
1868 return;
1869
1870
1871 memset(&pvma, 0, sizeof(struct vm_area_struct));
1872 pvma.vm_end = TASK_SIZE;
1873 mpol_set_shared_policy(sp, &pvma, new);
1874 mpol_put(new);
1875 }
1876}
1877
1878int mpol_set_shared_policy(struct shared_policy *info,
1879 struct vm_area_struct *vma, struct mempolicy *npol)
1880{
1881 int err;
1882 struct sp_node *new = NULL;
1883 unsigned long sz = vma_pages(vma);
1884
1885 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1886 vma->vm_pgoff,
1887 sz, npol ? npol->mode : -1,
1888 npol ? npol->flags : -1,
1889 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1890
1891 if (npol) {
1892 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1893 if (!new)
1894 return -ENOMEM;
1895 }
1896 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1897 if (err && new)
1898 kmem_cache_free(sn_cache, new);
1899 return err;
1900}
1901
1902
1903void mpol_free_shared_policy(struct shared_policy *p)
1904{
1905 struct sp_node *n;
1906 struct rb_node *next;
1907
1908 if (!p->root.rb_node)
1909 return;
1910 spin_lock(&p->lock);
1911 next = rb_first(&p->root);
1912 while (next) {
1913 n = rb_entry(next, struct sp_node, nd);
1914 next = rb_next(&n->nd);
1915 rb_erase(&n->nd, &p->root);
1916 mpol_put(n->policy);
1917 kmem_cache_free(sn_cache, n);
1918 }
1919 spin_unlock(&p->lock);
1920}
1921
1922
1923void __init numa_policy_init(void)
1924{
1925 nodemask_t interleave_nodes;
1926 unsigned long largest = 0;
1927 int nid, prefer = 0;
1928
1929 policy_cache = kmem_cache_create("numa_policy",
1930 sizeof(struct mempolicy),
1931 0, SLAB_PANIC, NULL);
1932
1933 sn_cache = kmem_cache_create("shared_policy_node",
1934 sizeof(struct sp_node),
1935 0, SLAB_PANIC, NULL);
1936
1937
1938
1939
1940
1941
1942 nodes_clear(interleave_nodes);
1943 for_each_node_state(nid, N_HIGH_MEMORY) {
1944 unsigned long total_pages = node_present_pages(nid);
1945
1946
1947 if (largest < total_pages) {
1948 largest = total_pages;
1949 prefer = nid;
1950 }
1951
1952
1953 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1954 node_set(nid, interleave_nodes);
1955 }
1956
1957
1958 if (unlikely(nodes_empty(interleave_nodes)))
1959 node_set(prefer, interleave_nodes);
1960
1961 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
1962 printk("numa_policy_init: interleaving failed\n");
1963}
1964
1965
1966void numa_default_policy(void)
1967{
1968 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1969}
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979#define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
1980static const char * const policy_types[] =
1981 { "default", "prefer", "bind", "interleave", "local" };
1982
1983
1984#ifdef CONFIG_TMPFS
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2004{
2005 struct mempolicy *new = NULL;
2006 unsigned short uninitialized_var(mode);
2007 unsigned short uninitialized_var(mode_flags);
2008 nodemask_t nodes;
2009 char *nodelist = strchr(str, ':');
2010 char *flags = strchr(str, '=');
2011 int i;
2012 int err = 1;
2013
2014 if (nodelist) {
2015
2016 *nodelist++ = '\0';
2017 if (nodelist_parse(nodelist, nodes))
2018 goto out;
2019 if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2020 goto out;
2021 } else
2022 nodes_clear(nodes);
2023
2024 if (flags)
2025 *flags++ = '\0';
2026
2027 for (i = 0; i <= MPOL_LOCAL; i++) {
2028 if (!strcmp(str, policy_types[i])) {
2029 mode = i;
2030 break;
2031 }
2032 }
2033 if (i > MPOL_LOCAL)
2034 goto out;
2035
2036 switch (mode) {
2037 case MPOL_PREFERRED:
2038
2039
2040
2041 if (nodelist) {
2042 char *rest = nodelist;
2043 while (isdigit(*rest))
2044 rest++;
2045 if (!*rest)
2046 err = 0;
2047 }
2048 break;
2049 case MPOL_INTERLEAVE:
2050
2051
2052
2053 if (!nodelist)
2054 nodes = node_states[N_HIGH_MEMORY];
2055 err = 0;
2056 break;
2057 case MPOL_LOCAL:
2058
2059
2060
2061 if (nodelist)
2062 goto out;
2063 mode = MPOL_PREFERRED;
2064 break;
2065
2066
2067
2068
2069
2070 }
2071
2072 mode_flags = 0;
2073 if (flags) {
2074
2075
2076
2077
2078 if (!strcmp(flags, "static"))
2079 mode_flags |= MPOL_F_STATIC_NODES;
2080 else if (!strcmp(flags, "relative"))
2081 mode_flags |= MPOL_F_RELATIVE_NODES;
2082 else
2083 err = 1;
2084 }
2085
2086 new = mpol_new(mode, mode_flags, &nodes);
2087 if (IS_ERR(new))
2088 err = 1;
2089 else if (no_context)
2090 new->w.user_nodemask = nodes;
2091
2092out:
2093
2094 if (nodelist)
2095 *--nodelist = ':';
2096 if (flags)
2097 *--flags = '=';
2098 if (!err)
2099 *mpol = new;
2100 return err;
2101}
2102#endif
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2116{
2117 char *p = buffer;
2118 int l;
2119 nodemask_t nodes;
2120 unsigned short mode;
2121 unsigned short flags = pol ? pol->flags : 0;
2122
2123
2124
2125
2126 VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2127
2128 if (!pol || pol == &default_policy)
2129 mode = MPOL_DEFAULT;
2130 else
2131 mode = pol->mode;
2132
2133 switch (mode) {
2134 case MPOL_DEFAULT:
2135 nodes_clear(nodes);
2136 break;
2137
2138 case MPOL_PREFERRED:
2139 nodes_clear(nodes);
2140 if (flags & MPOL_F_LOCAL)
2141 mode = MPOL_LOCAL;
2142 else
2143 node_set(pol->v.preferred_node, nodes);
2144 break;
2145
2146 case MPOL_BIND:
2147
2148 case MPOL_INTERLEAVE:
2149 if (no_context)
2150 nodes = pol->w.user_nodemask;
2151 else
2152 nodes = pol->v.nodes;
2153 break;
2154
2155 default:
2156 BUG();
2157 }
2158
2159 l = strlen(policy_types[mode]);
2160 if (buffer + maxlen < p + l + 1)
2161 return -ENOSPC;
2162
2163 strcpy(p, policy_types[mode]);
2164 p += l;
2165
2166 if (flags & MPOL_MODE_FLAGS) {
2167 if (buffer + maxlen < p + 2)
2168 return -ENOSPC;
2169 *p++ = '=';
2170
2171
2172
2173
2174 if (flags & MPOL_F_STATIC_NODES)
2175 p += snprintf(p, buffer + maxlen - p, "static");
2176 else if (flags & MPOL_F_RELATIVE_NODES)
2177 p += snprintf(p, buffer + maxlen - p, "relative");
2178 }
2179
2180 if (!nodes_empty(nodes)) {
2181 if (buffer + maxlen < p + 2)
2182 return -ENOSPC;
2183 *p++ = ':';
2184 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2185 }
2186 return p - buffer;
2187}
2188
2189struct numa_maps {
2190 unsigned long pages;
2191 unsigned long anon;
2192 unsigned long active;
2193 unsigned long writeback;
2194 unsigned long mapcount_max;
2195 unsigned long dirty;
2196 unsigned long swapcache;
2197 unsigned long node[MAX_NUMNODES];
2198};
2199
2200static void gather_stats(struct page *page, void *private, int pte_dirty)
2201{
2202 struct numa_maps *md = private;
2203 int count = page_mapcount(page);
2204
2205 md->pages++;
2206 if (pte_dirty || PageDirty(page))
2207 md->dirty++;
2208
2209 if (PageSwapCache(page))
2210 md->swapcache++;
2211
2212 if (PageActive(page) || PageUnevictable(page))
2213 md->active++;
2214
2215 if (PageWriteback(page))
2216 md->writeback++;
2217
2218 if (PageAnon(page))
2219 md->anon++;
2220
2221 if (count > md->mapcount_max)
2222 md->mapcount_max = count;
2223
2224 md->node[page_to_nid(page)]++;
2225}
2226
2227#ifdef CONFIG_HUGETLB_PAGE
2228static void check_huge_range(struct vm_area_struct *vma,
2229 unsigned long start, unsigned long end,
2230 struct numa_maps *md)
2231{
2232 unsigned long addr;
2233 struct page *page;
2234 struct hstate *h = hstate_vma(vma);
2235 unsigned long sz = huge_page_size(h);
2236
2237 for (addr = start; addr < end; addr += sz) {
2238 pte_t *ptep = huge_pte_offset(vma->vm_mm,
2239 addr & huge_page_mask(h));
2240 pte_t pte;
2241
2242 if (!ptep)
2243 continue;
2244
2245 pte = *ptep;
2246 if (pte_none(pte))
2247 continue;
2248
2249 page = pte_page(pte);
2250 if (!page)
2251 continue;
2252
2253 gather_stats(page, md, pte_dirty(*ptep));
2254 }
2255}
2256#else
2257static inline void check_huge_range(struct vm_area_struct *vma,
2258 unsigned long start, unsigned long end,
2259 struct numa_maps *md)
2260{
2261}
2262#endif
2263
2264
2265
2266
2267int show_numa_map(struct seq_file *m, void *v)
2268{
2269 struct proc_maps_private *priv = m->private;
2270 struct vm_area_struct *vma = v;
2271 struct numa_maps *md;
2272 struct file *file = vma->vm_file;
2273 struct mm_struct *mm = vma->vm_mm;
2274 struct mempolicy *pol;
2275 int n;
2276 char buffer[50];
2277
2278 if (!mm)
2279 return 0;
2280
2281 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2282 if (!md)
2283 return 0;
2284
2285 pol = get_vma_policy(priv->task, vma, vma->vm_start);
2286 mpol_to_str(buffer, sizeof(buffer), pol, 0);
2287 mpol_cond_put(pol);
2288
2289 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2290
2291 if (file) {
2292 seq_printf(m, " file=");
2293 seq_path(m, &file->f_path, "\n\t= ");
2294 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2295 seq_printf(m, " heap");
2296 } else if (vma->vm_start <= mm->start_stack &&
2297 vma->vm_end >= mm->start_stack) {
2298 seq_printf(m, " stack");
2299 }
2300
2301 if (is_vm_hugetlb_page(vma)) {
2302 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2303 seq_printf(m, " huge");
2304 } else {
2305 check_pgd_range(vma, vma->vm_start, vma->vm_end,
2306 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2307 }
2308
2309 if (!md->pages)
2310 goto out;
2311
2312 if (md->anon)
2313 seq_printf(m," anon=%lu",md->anon);
2314
2315 if (md->dirty)
2316 seq_printf(m," dirty=%lu",md->dirty);
2317
2318 if (md->pages != md->anon && md->pages != md->dirty)
2319 seq_printf(m, " mapped=%lu", md->pages);
2320
2321 if (md->mapcount_max > 1)
2322 seq_printf(m, " mapmax=%lu", md->mapcount_max);
2323
2324 if (md->swapcache)
2325 seq_printf(m," swapcache=%lu", md->swapcache);
2326
2327 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2328 seq_printf(m," active=%lu", md->active);
2329
2330 if (md->writeback)
2331 seq_printf(m," writeback=%lu", md->writeback);
2332
2333 for_each_node_state(n, N_HIGH_MEMORY)
2334 if (md->node[n])
2335 seq_printf(m, " N%d=%lu", n, md->node[n]);
2336out:
2337 seq_putc(m, '\n');
2338 kfree(md);
2339
2340 if (m->count < m->size)
2341 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
2342 return 0;
2343}
2344